Update apex-3.4 documentation from master to include security changes and 
development best practices.


Project: http://git-wip-us.apache.org/repos/asf/apex-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/apex-site/commit/21e76a00
Tree: http://git-wip-us.apache.org/repos/asf/apex-site/tree/21e76a00
Diff: http://git-wip-us.apache.org/repos/asf/apex-site/diff/21e76a00

Branch: refs/heads/asf-site
Commit: 21e76a006707cf1871eacbc5ab99eb17cf8e3d2b
Parents: 974bace
Author: Thomas Weise <[email protected]>
Authored: Tue Sep 6 19:06:26 2016 -0700
Committer: Thomas Weise <[email protected]>
Committed: Tue Sep 6 19:06:26 2016 -0700

----------------------------------------------------------------------
 docs/apex-3.4/__init__.pyc                      | Bin 166 -> 163 bytes
 docs/apex-3.4/apex_cli/index.html               |  11 +-
 docs/apex-3.4/apex_development_setup/index.html |  17 +-
 .../apex-3.4/application_development/index.html |  15 +-
 docs/apex-3.4/application_packages/index.html   |   7 +
 docs/apex-3.4/autometrics/index.html            |  13 +-
 docs/apex-3.4/compatibility/index.html          |   7 +
 .../development_best_practices/index.html       | 376 +++++++++++++++++++
 docs/apex-3.4/images/security/image03.png       | Bin 0 -> 18677 bytes
 docs/apex-3.4/index.html                        |  11 +-
 docs/apex-3.4/license/highlight.js/LICENSE      |  24 --
 docs/apex-3.4/main.html                         |  10 +
 docs/apex-3.4/mkdocs/js/lunr-0.5.7.min.js       |   7 +
 docs/apex-3.4/mkdocs/search_index.json          | 126 ++++++-
 docs/apex-3.4/operator_development/index.html   |   9 +-
 docs/apex-3.4/search.html                       |   7 +
 docs/apex-3.4/security/index.html               | 129 +++++--
 docs/apex-3.4/sitemap.xml                       |  24 +-
 18 files changed, 705 insertions(+), 88 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/apex-site/blob/21e76a00/docs/apex-3.4/__init__.pyc
----------------------------------------------------------------------
diff --git a/docs/apex-3.4/__init__.pyc b/docs/apex-3.4/__init__.pyc
index f478a23..5d767d8 100644
Binary files a/docs/apex-3.4/__init__.pyc and b/docs/apex-3.4/__init__.pyc 
differ

http://git-wip-us.apache.org/repos/asf/apex-site/blob/21e76a00/docs/apex-3.4/apex_cli/index.html
----------------------------------------------------------------------
diff --git a/docs/apex-3.4/apex_cli/index.html 
b/docs/apex-3.4/apex_cli/index.html
index f6c491e..c45aec1 100644
--- a/docs/apex-3.4/apex_cli/index.html
+++ b/docs/apex-3.4/apex_cli/index.html
@@ -102,6 +102,13 @@
     </li>
 
         
+            
+    <li class="toctree-l1 ">
+        <a class="" href="../development_best_practices/">Best Practices</a>
+        
+    </li>
+
+        
     </ul>
 <li>
           
@@ -436,7 +443,7 @@ they must be part of the jar files that were deployed at 
application launch time
         <a href="../security/" class="btn btn-neutral float-right" 
title="Security">Next <span class="icon icon-circle-arrow-right"></span></a>
       
       
-        <a href="../autometrics/" class="btn btn-neutral" title="AutoMetric 
API"><span class="icon icon-circle-arrow-left"></span> Previous</a>
+        <a href="../development_best_practices/" class="btn btn-neutral" 
title="Best Practices"><span class="icon icon-circle-arrow-left"></span> 
Previous</a>
       
     </div>
   
@@ -462,7 +469,7 @@ they must be part of the jar files that were deployed at 
application launch time
     <span class="rst-current-version" data-toggle="rst-current-version">
       
       
-        <span><a href="../autometrics/" style="color: #fcfcfc;">&laquo; 
Previous</a></span>
+        <span><a href="../development_best_practices/" style="color: 
#fcfcfc;">&laquo; Previous</a></span>
       
       
         <span style="margin-left: 15px"><a href="../security/" style="color: 
#fcfcfc">Next &raquo;</a></span>

http://git-wip-us.apache.org/repos/asf/apex-site/blob/21e76a00/docs/apex-3.4/apex_development_setup/index.html
----------------------------------------------------------------------
diff --git a/docs/apex-3.4/apex_development_setup/index.html 
b/docs/apex-3.4/apex_development_setup/index.html
index 75a7891..1af03d1 100644
--- a/docs/apex-3.4/apex_development_setup/index.html
+++ b/docs/apex-3.4/apex_development_setup/index.html
@@ -119,6 +119,13 @@
     </li>
 
         
+            
+    <li class="toctree-l1 ">
+        <a class="" href="../development_best_practices/">Best Practices</a>
+        
+    </li>
+
+        
     </ul>
 <li>
           
@@ -306,22 +313,22 @@ project properties at <em>Properties &#8658; Run/Debug 
Settings &#8658; Applicat
 <ol>
 <li>
 <p>Check out the source code repositories:</p>
-<pre><code>git clone https://github.com/apache/incubator-apex-core
-git clone https://github.com/apache/incubator-apex-malhar
+<pre><code>git clone https://github.com/apache/apex-core
+git clone https://github.com/apache/apex-malhar
 </code></pre>
 </li>
 <li>
 <p>Switch to the appropriate release branch and build each repository:</p>
-<pre><code>cd incubator-apex-core
+<pre><code>cd apex-core
 mvn clean install -DskipTests
 
-cd incubator-apex-malhar
+cd apex-malhar
 mvn clean install -DskipTests
 </code></pre>
 </li>
 </ol>
 <p>The <code>install</code> argument to the <code>mvn</code> command installs 
resources from each project to your local maven repository (typically 
<code>.m2/repository</code> under your home directory), and 
<strong>not</strong> to the system directories, so Administrator privileges are 
not required. The  <code>-DskipTests</code> argument skips running unit tests 
since they take a long time. If this is a first-time installation, it might 
take several minutes to complete because maven will download a number of 
associated plugins.</p>
-<p>After the build completes, you should see the demo application package 
files in the target directory under each demo subdirectory in 
<code>incubator-apex-malhar/demos</code>.</p>
+<p>After the build completes, you should see the demo application package 
files in the target directory under each demo subdirectory in 
<code>apex-malhar/demos</code>.</p>
 <h2 id="sandbox">Sandbox</h2>
 <p>To jump start development with an Apache Hadoop single node cluster, <a 
href="https://www.datatorrent.com/download";>DataTorrent Sandbox</a> powered by 
VirtualBox is available on Windows, Linux, or Mac platforms.  The sandbox is 
configured by default to run with 6GB RAM; if your development machine has 16GB 
or more, you can increase the sandbox RAM to 8GB or more using the VirtualBox 
console.  This will yield better performance and support larger applications.  
The advantage of developing in the sandbox is that most of the tools (e.g. 
<em>jdk</em>, <em>git</em>, <em>maven</em>), Hadoop YARN and HDFS, and a 
distribution of Apache Apex and DataTorrent RTS are pre-installed.  The 
disadvantage is that the sandbox is a memory-limited environment, and requires 
settings changes and restarts to adjust memory available for development and 
testing.</p>
               

http://git-wip-us.apache.org/repos/asf/apex-site/blob/21e76a00/docs/apex-3.4/application_development/index.html
----------------------------------------------------------------------
diff --git a/docs/apex-3.4/application_development/index.html 
b/docs/apex-3.4/application_development/index.html
index 8c8f184..20d8e2e 100644
--- a/docs/apex-3.4/application_development/index.html
+++ b/docs/apex-3.4/application_development/index.html
@@ -187,6 +187,13 @@
     </li>
 
         
+            
+    <li class="toctree-l1 ">
+        <a class="" href="../development_best_practices/">Best Practices</a>
+        
+    </li>
+
+        
     </ul>
 <li>
           
@@ -278,7 +285,7 @@ operators to the <a 
href="../operator_development/">Operator Development Guide</
 <h1 id="running-a-test-application">Running A Test Application</h1>
 <p>If you are starting with the Apex platform for the first time,
 it can be informative to launch an existing application and see it run.
-One of the simplest examples provided in <a 
href="https://github.com/apache/incubator-apex-malhar";>Apex-Malhar 
repository</a> is a Pi demo application,
+One of the simplest examples provided in <a 
href="https://github.com/apache/apex-malhar";>Apex-Malhar repository</a> is a Pi 
demo application,
 which computes the value of PI using random numbers.  After <a 
href="../apex_development_setup/">setting up development environment</a>
 Pi demo can be launched as follows:</p>
 <ol>
@@ -907,7 +914,7 @@ project name “Malhar” as part of our efforts to foster 
community
 innovation. These operators can be used in a DAG as is, while others
 have properties that can be set to specify the
 desired computation. Those interested in details, should refer to
-<a href="https://github.com/apache/incubator-apex-malhar";>Apex-Malhar operator 
library</a>.</p>
+<a href="https://github.com/apache/apex-malhar";>Apex-Malhar operator 
library</a>.</p>
 <p>The platform is a Hadoop YARN native
 application. It runs in a Hadoop cluster just like any
 other YARN application (MapReduce etc.) and is designed to seamlessly
@@ -1281,7 +1288,7 @@ DAG in local mode within the IDE.</p>
 <li>The <code>operators</code> field is the list of operators the application 
has. You can specifiy the name, the Java class, and the properties of each 
operator here.</li>
 <li>The <code>streams</code> field is the list of streams that connects the 
operators together to form the DAG. Each stream consists of the stream name, 
the operator and port that it connects from, and the list of operators and 
ports that it connects to. Note that you can connect from <em>one</em> output 
port of an operator to <em>multiple</em> different input ports of different 
operators.</li>
 </ul>
-<p>In Apex Malhar, there is an <a 
href="https://github.com/apache/incubator-apex-malhar/blob/master/demos/pi/src/main/resources/app/PiJsonDemo.json";>example</a>
 in the Pi Demo doing just that.</p>
+<p>In Apex Malhar, there is an <a 
href="https://github.com/apache/apex-malhar/blob/master/demos/pi/src/main/resources/app/PiJsonDemo.json";>example</a>
 in the Pi Demo doing just that.</p>
 <h3 id="properties-file-dag-specification">Properties File DAG 
Specification</h3>
 <p>The platform also supports specification of a DAG via a properties
 file. The aim here to make it easy for tools to create and run an
@@ -2625,7 +2632,7 @@ details refer to  <a 
href="http://docs.datatorrent.com/configuration/";>Configura
 <hr />
 <h1 id="demos">Demos</h1>
 <p>The source code for the demos is available in the open-source
-<a href="https://github.com/apache/incubator-apex-malhar";>Apache Apex-Malhar 
repository</a>.
+<a href="https://github.com/apache/apex-malhar";>Apache Apex-Malhar 
repository</a>.
 All of these do computations in real-time. Developers are encouraged to
 review them as they use various features of the platform and provide an
 opportunity for quick learning.</p>

http://git-wip-us.apache.org/repos/asf/apex-site/blob/21e76a00/docs/apex-3.4/application_packages/index.html
----------------------------------------------------------------------
diff --git a/docs/apex-3.4/application_packages/index.html 
b/docs/apex-3.4/application_packages/index.html
index 654c764..d4aff60 100644
--- a/docs/apex-3.4/application_packages/index.html
+++ b/docs/apex-3.4/application_packages/index.html
@@ -129,6 +129,13 @@
     </li>
 
         
+            
+    <li class="toctree-l1 ">
+        <a class="" href="../development_best_practices/">Best Practices</a>
+        
+    </li>
+
+        
     </ul>
 <li>
           

http://git-wip-us.apache.org/repos/asf/apex-site/blob/21e76a00/docs/apex-3.4/autometrics/index.html
----------------------------------------------------------------------
diff --git a/docs/apex-3.4/autometrics/index.html 
b/docs/apex-3.4/autometrics/index.html
index 5d01dec..4712619 100644
--- a/docs/apex-3.4/autometrics/index.html
+++ b/docs/apex-3.4/autometrics/index.html
@@ -128,6 +128,13 @@
     </li>
 
         
+            
+    <li class="toctree-l1 ">
+        <a class="" href="../development_best_practices/">Best Practices</a>
+        
+    </li>
+
+        
     </ul>
 <li>
           
@@ -234,7 +241,7 @@
 <p>When an operator is partitioned, it is useful to aggregate the values of 
auto-metrics across all its partitions every window to get a logical view of 
these metrics. The application master performs these aggregations using metrics 
aggregators.</p>
 <p>The AutoMetric API helps to achieve this by providing an interface for 
writing aggregators- <code>AutoMetric.Aggregator</code>. Any implementation of 
<code>AutoMetric.Aggregator</code> can be set as an operator attribute - 
<code>METRICS_AGGREGATOR</code> for a particular operator which in turn is used 
for aggregating physical metrics.</p>
 <h2 id="default-aggregators">Default aggregators</h2>
-<p><a 
href="https://github.com/apache/incubator-apex-core/blob/master/common/src/main/java/com/datatorrent/common/metric/MetricsAggregator.java";><code>MetricsAggregator</code></a>
 is a simple implementation of <code>AutoMetric.Aggregator</code> that platform 
uses as a default for summing up primitive types - int, long, float and 
double.</p>
+<p><a 
href="https://github.com/apache/apex-core/blob/master/common/src/main/java/com/datatorrent/common/metric/MetricsAggregator.java";><code>MetricsAggregator</code></a>
 is a simple implementation of <code>AutoMetric.Aggregator</code> that platform 
uses as a default for summing up primitive types - int, long, float and 
double.</p>
 <p><code>MetricsAggregator</code> is just a collection of 
<code>SingleMetricAggregator</code>s. There are multiple implementations of 
<code>SingleMetricAggregator</code> that perform sum, min, max, avg which are 
present in Apex core and Apex malhar.</p>
 <p>For the <code>LineReceiver</code> operator, the application developer need 
not specify any aggregator. The platform will automatically inject an instance 
of <code>MetricsAggregator</code> that contains two 
<code>LongSumAggregator</code>s - one for <code>length</code> and one for 
<code>count</code>. This aggregator will report sum of length and sum of count 
across all the partitions of <code>LineReceiver</code>.</p>
 <h2 id="building-custom-aggregators">Building custom aggregators</h2>
@@ -358,7 +365,7 @@
   
     <div class="rst-footer-buttons" role="navigation" aria-label="footer 
navigation">
       
-        <a href="../apex_cli/" class="btn btn-neutral float-right" title="Apex 
CLI">Next <span class="icon icon-circle-arrow-right"></span></a>
+        <a href="../development_best_practices/" class="btn btn-neutral 
float-right" title="Best Practices">Next <span class="icon 
icon-circle-arrow-right"></span></a>
       
       
         <a href="../operator_development/" class="btn btn-neutral" 
title="Operators"><span class="icon icon-circle-arrow-left"></span> Previous</a>
@@ -390,7 +397,7 @@
         <span><a href="../operator_development/" style="color: 
#fcfcfc;">&laquo; Previous</a></span>
       
       
-        <span style="margin-left: 15px"><a href="../apex_cli/" style="color: 
#fcfcfc">Next &raquo;</a></span>
+        <span style="margin-left: 15px"><a 
href="../development_best_practices/" style="color: #fcfcfc">Next 
&raquo;</a></span>
       
     </span>
 </div>

http://git-wip-us.apache.org/repos/asf/apex-site/blob/21e76a00/docs/apex-3.4/compatibility/index.html
----------------------------------------------------------------------
diff --git a/docs/apex-3.4/compatibility/index.html 
b/docs/apex-3.4/compatibility/index.html
index ee9fece..9c682ee 100644
--- a/docs/apex-3.4/compatibility/index.html
+++ b/docs/apex-3.4/compatibility/index.html
@@ -102,6 +102,13 @@
     </li>
 
         
+            
+    <li class="toctree-l1 ">
+        <a class="" href="../development_best_practices/">Best Practices</a>
+        
+    </li>
+
+        
     </ul>
 <li>
           

http://git-wip-us.apache.org/repos/asf/apex-site/blob/21e76a00/docs/apex-3.4/development_best_practices/index.html
----------------------------------------------------------------------
diff --git a/docs/apex-3.4/development_best_practices/index.html 
b/docs/apex-3.4/development_best_practices/index.html
new file mode 100644
index 0000000..c2a143f
--- /dev/null
+++ b/docs/apex-3.4/development_best_practices/index.html
@@ -0,0 +1,376 @@
+<!DOCTYPE html>
+<!--[if IE 8]><html class="no-js lt-ie9" lang="en" > <![endif]-->
+<!--[if gt IE 8]><!--> <html class="no-js" lang="en" > <!--<![endif]-->
+<head>
+  <meta charset="utf-8">
+  <meta http-equiv="X-UA-Compatible" content="IE=edge">
+  <meta name="viewport" content="width=device-width, initial-scale=1.0">
+  
+  
+  
+  <title>Best Practices - Apache Apex Documentation</title>
+  
+
+  <link rel="shortcut icon" href="../favicon.ico">
+  
+
+  
+  <link 
href='https://fonts.googleapis.com/css?family=Lato:400,700|Roboto+Slab:400,700|Inconsolata:400,700'
 rel='stylesheet' type='text/css'>
+
+  <link rel="stylesheet" href="../css/theme.css" type="text/css" />
+  <link rel="stylesheet" href="../css/theme_extra.css" type="text/css" />
+  <link rel="stylesheet" href="../css/highlight.css">
+
+  
+  <script>
+    // Current page data
+    var mkdocs_page_name = "Best Practices";
+    var mkdocs_page_input_path = "development_best_practices.md";
+    var mkdocs_page_url = "/development_best_practices/";
+  </script>
+  
+  <script src="../js/jquery-2.1.1.min.js"></script>
+  <script src="../js/modernizr-2.8.3.min.js"></script>
+  <script type="text/javascript" src="../js/highlight.pack.js"></script>
+  <script src="../js/theme.js"></script> 
+
+  
+</head>
+
+<body class="wy-body-for-nav" role="document">
+
+  <div class="wy-grid-for-nav">
+
+    
+    <nav data-toggle="wy-nav-shift" class="wy-nav-side stickynav">
+      <div class="wy-side-nav-search">
+        <a href=".." class="icon icon-home"> Apache Apex Documentation</a>
+        <div role="search">
+  <form id ="rtd-search-form" class="wy-form" action="../search.html" 
method="get">
+    <input type="text" name="q" placeholder="Search docs" />
+  </form>
+</div>
+      </div>
+
+      <div class="wy-menu wy-menu-vertical" data-spy="affix" role="navigation" 
aria-label="main navigation">
+        <ul class="current">
+          
+            <li>
+    <li class="toctree-l1 ">
+        <a class="" href="..">Apache Apex</a>
+        
+    </li>
+<li>
+          
+            <li>
+    <ul class="subnav">
+    <li><span>Development</span></li>
+
+        
+            
+    <li class="toctree-l1 ">
+        <a class="" href="../apex_development_setup/">Development Setup</a>
+        
+    </li>
+
+        
+            
+    <li class="toctree-l1 ">
+        <a class="" href="../application_development/">Applications</a>
+        
+    </li>
+
+        
+            
+    <li class="toctree-l1 ">
+        <a class="" href="../application_packages/">Packages</a>
+        
+    </li>
+
+        
+            
+    <li class="toctree-l1 ">
+        <a class="" href="../operator_development/">Operators</a>
+        
+    </li>
+
+        
+            
+    <li class="toctree-l1 ">
+        <a class="" href="../autometrics/">AutoMetric API</a>
+        
+    </li>
+
+        
+            
+    <li class="toctree-l1 current">
+        <a class="current" href="./">Best Practices</a>
+        
+            <ul>
+            
+                <li class="toctree-l3"><a 
href="#development-best-practices">Development Best Practices</a></li>
+                
+                    <li><a class="toctree-l4" 
href="#operators">Operators</a></li>
+                
+                    <li><a class="toctree-l4" href="#input-operators">Input 
Operators</a></li>
+                
+                    <li><a class="toctree-l4" href="#output-operators">Output 
Operators</a></li>
+                
+                    <li><a class="toctree-l4" 
href="#partitioning">Partitioning</a></li>
+                
+                    <li><a class="toctree-l4" href="#threads">Threads</a></li>
+                
+            
+            </ul>
+        
+    </li>
+
+        
+    </ul>
+<li>
+          
+            <li>
+    <ul class="subnav">
+    <li><span>Operations</span></li>
+
+        
+            
+    <li class="toctree-l1 ">
+        <a class="" href="../apex_cli/">Apex CLI</a>
+        
+    </li>
+
+        
+            
+    <li class="toctree-l1 ">
+        <a class="" href="../security/">Security</a>
+        
+    </li>
+
+        
+    </ul>
+<li>
+          
+            <li>
+    <li class="toctree-l1 ">
+        <a class="" href="../compatibility/">Compatibility</a>
+        
+    </li>
+<li>
+          
+        </ul>
+      </div>
+      &nbsp;
+    </nav>
+
+    <section data-toggle="wy-nav-shift" class="wy-nav-content-wrap">
+
+      
+      <nav class="wy-nav-top" role="navigation" aria-label="top navigation">
+        <i data-toggle="wy-nav-top" class="fa fa-bars"></i>
+        <a href="..">Apache Apex Documentation</a>
+      </nav>
+
+      
+      <div class="wy-nav-content">
+        <div class="rst-content">
+          <div role="navigation" aria-label="breadcrumbs navigation">
+  <ul class="wy-breadcrumbs">
+    <li><a href="..">Docs</a> &raquo;</li>
+    
+      
+        
+          <li>Development &raquo;</li>
+        
+      
+    
+    <li>Best Practices</li>
+    <li class="wy-breadcrumbs-aside">
+      
+    </li>
+  </ul>
+  <hr/>
+</div>
+          <div role="main">
+            <div class="section">
+              
+                <h1 id="development-best-practices">Development Best 
Practices</h1>
+<p>This document describes the best practices to follow when developing 
operators and other application components such as partitoners, stream codecs 
etc on the Apache Apex platform.</p>
+<h2 id="operators">Operators</h2>
+<p>These are general guidelines for all operators that are covered in the 
current section. The subsequent sections talk about special considerations for 
input and output operators.</p>
+<ul>
+<li>When writing a new operator to be used in an application, consider 
breaking it down into<ul>
+<li>An abstract operator that encompasses the core functionality but leaves 
application specific schemas and logic to the implementation.</li>
+<li>An optional concrete operator also in the library that extends the 
abstract operator and provides commonly used schema types such as strings, 
byte[] or POJOs.</li>
+</ul>
+</li>
+<li>Follow these conventions for the life cycle methods:<ul>
+<li>Do one time initialization of entities that apply for the entire lifetime 
of the operator in the <strong>setup</strong> method, e.g., factory 
initializations. Initializations in <strong>setup</strong> are done in the 
container where the operator is deployed. Allocating memory for fields in the 
constructor is not efficient as it would lead to extra garbage in memory for 
the following reason. The operator is instantiated on the client from where the 
application is launched, serialized and started one of the Hadoop nodes in a 
container. So the constructor is first called on the client and if it were to 
initialize any of the fields, that state would be saved during serialization. 
In the Hadoop container the operator is deserialized and started. This would 
invoke the constructor again, which will initialize the fields but their state 
will get overwritten by the serialized state and the initial values would 
become garbage in memory.</li>
+<li>Do one time initialization for live entities in <strong>activate</strong> 
method, e.g., opening connections to a database server or starting a thread for 
asynchronous operations. The <strong>activate</strong> method is called right 
before processing starts so it is a better place for these initializations than 
at <strong>setup</strong> which can lead to a delay before processing data from 
the live entity.  </li>
+<li>Perform periodic tasks based on processing time in application window 
boundaries.</li>
+<li>Perform initializations needed for each application window in 
<strong>beginWindow</strong>.</li>
+<li>Perform aggregations needed for each application window  in 
<strong>endWindow</strong>.</li>
+<li>Teardown of live entities (inverse of tasks performed during activate) 
should be in the <strong>deactivate</strong> method.</li>
+<li>Teardown of lifetime entities (those initialized in setup method) should 
happen in the <strong>teardown</strong> method.</li>
+<li>If the operator implementation is not finalized mark it with the 
<strong>@Evolving</strong> annotation.</li>
+</ul>
+</li>
+<li>If the operator needs to perform operations based on event time of the 
individual tuples and not the processing time, extend and use the 
<strong>WindowedOperator</strong>. Refer to documentation of that operator for 
details on how to use it.</li>
+<li>If an operator needs to do some work when it is not receiving any input, 
it should implement <strong>IdleTimeHandler</strong> interface. This interface 
contains <strong>handleIdleTime</strong> method which will be called whenever 
the platform isn’t doing anything else and the operator can do the work in 
this method. If for any reason the operator does not have any work to do when 
this method is called, it should sleep for a small amount of time such as that 
specified by the <strong>SPIN_MILLIS</strong> attribute so that it does not 
cause a busy wait when called repeatedly by the platform. Also, the method 
should not block and return in a reasonable amount of time that is less than 
the streaming window size (which is 500ms by default).</li>
+<li>Often operators have customizable parameters such as information about 
locations of external systems or parameters that modify the behavior of the 
operator. Users should be able to specify these easily without having to change 
source code. This can be done by making them properties of the operator because 
they can then be initialized from external properties files.<ul>
+<li>Where possible default values should be provided for the properties in the 
source code.</li>
+<li>Validation rules should be specified for the properties using javax 
constraint validations that check whether the values specified for the 
properties are in the correct format, range or other operator requirements. 
Required properties should have at least a <strong>@NotNull</strong> validation 
specifying that they have to be specified by the user.</li>
+</ul>
+</li>
+</ul>
+<h3 id="checkpointing">Checkpointing</h3>
+<p>Checkpointing is a process of snapshotting the state of an operator and 
saving it so that in case of failure the state can be used to restore the 
operator to a prior state and continue processing. It is automatically 
performed by the platform at a configurable interval. All operators in the 
application are checkpointed in a distributed fashion, thus allowing the entire 
state of the application to be saved and available for recovery if needed. Here 
are some things to remember when it comes to checkpointing:</p>
+<ul>
+<li>The process of checkpointing involves snapshotting the state by 
serializing the operator and saving it to a store. This is done using a 
<strong>StorageAgent</strong>. By default a <em>StorageAgent</em> is already 
provided by the platform and it is called <strong>AsyncFSStorageAgent</strong>. 
It serializes the operator using Kryo and saves the serialized state 
asynchronously to a filesystem such as HDFS. There are other implementations of 
<em>StorageAgent</em> available such as 
<strong>GeodeKeyValueStorageAgent</strong> that stores the serialized state in 
Geode which is an in-memory replicated data grid.</li>
+<li>All variables in the operator marked neither transient nor final are saved 
so any variables in the operator that are not part of the state should be 
marked transient. Specifically any variables like connection objects, i/o 
streams, ports are transient, because they need to be setup again on failure 
recovery.</li>
+<li>If the operator does not keep any state between windows, mark it with the 
<strong>@Stateless</strong> annotation. This results in efficiencies during 
checkpointing and recovery. The operator will not be checkpointed and is always 
restored to the initial state</li>
+<li>The checkpoint interval can be set using the 
<strong>CHECKPOINT_WINDOW_COUNT</strong> attribute which specifies the interval 
in terms of number of streaming windows.</li>
+<li>If the correct functioning of the operator requires the 
<strong>endWindow</strong> method be called before checkpointing can happen, 
then the checkpoint interval should align with application window interval 
i.e., it should be a multiple of application window interval. In this case the 
operator should be marked with <strong>OperatorAnnotation</strong> and 
<strong>checkpointableWithinAppWindow</strong> set to false. If the window 
intervals are configured by the user and they don’t align, it will result in 
a DAG validation error and application won’t launch.</li>
+<li>In some cases the operator state related to a piece of data needs to be 
purged once that data is no longer required by the application, otherwise the 
state will continue to build up indefinitely. The platform provides a way to 
let the operator know about this using a callback listener called 
<strong>CheckpointNotificationListener</strong>. This listener has a callback 
method called <strong>committed</strong>, which is called by the platform from 
time to time with a window id that has been processed successfully by all the 
operators in the DAG and hence is no longer needed. The operator can delete all 
the state corresponding to window ids less than or equal to the provided window 
id.</li>
+<li>Sometimes operators need to perform some tasks just before checkpointing. 
For example, filesystem operators may want to flush the files just before 
checkpoint so they can be sure that all pending data is written to disk and no 
data is lost if there is an operator failure just after the checkpoint and the 
operator restarts from the checkpoint. To do this the operator would implement 
the same <em>CheckpointNotificationListener</em> interface and implement the 
<strong>beforeCheckpoint</strong> method where it can do these tasks.</li>
+<li>If the operator is going to have a large state, checkpointing the entire 
state each time becomes unviable. Furthermore, the amount of memory needed to 
hold the state could be larger than the amount of physical memory available. In 
these cases the operator should checkpoint the state incrementally and also 
manage the memory for the state more efficiently. The platform provides a 
utiltiy called <strong>ManagedState</strong> that uses a combination of in 
memory and disk cache to efficiently store and retrieve data in a performant, 
fault tolerant way and also checkpoint it in an incremental fashion. There are 
operators in the platform that use <em>ManagedState</em> and can be used as a 
reference on how to use this utility such as Dedup or Join operators.</li>
+</ul>
+<h2 id="input-operators">Input Operators</h2>
+<p>Input operators have additional requirements:</p>
+<ul>
+<li>The <strong>emitTuples</strong> method implemented by the operator, is 
called by the platform, to give the operator an opportunity to emit some data. 
This method is always called within a window boundary but can be called 
multiple times within the same window. There are some important guidelines on 
how to implement this method:<ul>
+<li>This should not be a blocking method and should return in a reasonable 
time that is less than the streaming window size (which is 500ms by default). 
This also applies to other callback methods called by the platform such as 
<em>beginWindow</em>, <em>endWindow</em> etc., but is more important here since 
this method will be called continuously by the platform.</li>
+<li>If the operator needs to interact with external systems to obtain data and 
this can potentially take a long time, then this should be performed 
asynchronously in a different thread. Refer to the threading section below for 
the guidelines when using threading.</li>
+<li>In each invocation, the method can emit any number of data tuples.</li>
+</ul>
+</li>
+</ul>
+<h3 id="idempotence">Idempotence</h3>
+<p>Many applications write data to external systems using output operators. To 
ensure that data is present exactly once in the external system even in a 
failure recovery scenario, the output operators expect the replayed windows 
during recovery contain the same data as before the failure. This is called 
idempotency. Since operators within the DAG are merely responding to input data 
provided to them by the upstream operators and the input operator has no 
upstream operator, the responsibility of idempotent replay falls on the input 
operators.</p>
+<ul>
+<li>For idempotent replay of data, the operator needs to store some 
meta-information for every window that would allow it to identify what data was 
sent in that window. This is called the idempotent state.<ul>
+<li>If the external source of the input operator allows replayability, this 
could be information such as offset of last piece of data in the window, an 
identifier of the last piece of data itself or number of data tuples sent.</li>
+<li>However if the external source does not allow replayability from an 
operator specified point, then the entire data sent within the window may need 
to be persisted by the operator.</li>
+</ul>
+</li>
+<li>The platform provides a utility called <em>WindowDataManager</em> to allow 
operators to save and retrieve idempotent state every window. Operators should 
use this to implement idempotency.</li>
+</ul>
+<h2 id="output-operators">Output Operators</h2>
+<p>Output operators typically connect to external storage systems such as 
filesystems, databases or key value stores to store data.</p>
+<ul>
+<li>In some situations, the external systems may not be functioning in a 
reliable fashion. They may be having prolonged outages or performance problems. 
If the operator is being designed to work in such environments, it needs to be 
able to to handle these problems gracefully and not block the DAG or fail. In 
these scenarios the operator should cache the data into a local store such as 
HDFS and interact with external systems in a separate thread so as to not have 
problems in the operator lifecycle thread. This pattern is called the 
<strong>Reconciler</strong> pattern and there are operators that implement this 
pattern available in the library for reference.</li>
+</ul>
+<h3 id="end-to-end-exactly-once">End-to-End Exactly Once</h3>
+<p>When output operators store data in external systems, it is important that 
they do not lose data or write duplicate data when there is a failure event and 
the DAG recovers from that failure. In failure recovery, the windows from the 
previous checkpoint are replayed and the operator receives this data again. The 
operator should ensure that it does not write this data again. Operator 
developers should figure out how to do this specifically for the operators they 
are developing depending on the logic of the operators. Below are examples of 
how a couple of existing output operators do this for reference.</p>
+<ul>
+<li>File output operator that writes data to files keeps track of the file 
lengths in the state. These lengths are checkpointed and restored on failure 
recovery. On restart, the operator truncates the file to the length equal to 
the length in the recovered state. This makes the data in the file same as it 
was at the time of checkpoint before the failure. The operator now writes the 
replayed data from the checkpoint in regular fashion as any other data. This 
ensures no data is lost or duplicated in the file.</li>
+<li>The JDBC output operator that writes data to a database table writes the 
data in a window in a single transaction. It also writes the current window id 
into a meta table along with the data as part of the same transaction. It 
commits the transaction at the end of the window. When there is an operator 
failure before the final commit, the state of the database is that it contains 
the data from the previous fully processed window and its window id since the 
current window transaction isn’t yet committed. On recovery, the operator 
reads this window id back from the meta table. It ignores all the replayed 
windows whose window id is less than or equal to the recovered window id and 
thus ensures that it does not duplicate data already present in the database. 
It starts writing data normally again when window id of data becomes greater 
than recovered window thus ensuring no data is lost.</li>
+</ul>
+<h2 id="partitioning">Partitioning</h2>
+<p>Partitioning allows an operation to be scaled to handle more pieces of data 
than before but with a similar SLA. This is done by creating multiple instances 
of an operator and distributing the data among them. Input operators can also 
be partitioned to stream more pieces of data into the application. The platform 
provides a lot of flexibility and options for partitioning. Partitioning can 
happen once at startup or can be dynamically changed anytime while the 
application is running, and it can be done in a stateless or stateful way by 
distributing state from the old partitions to new partitions.</p>
+<p>In the platform, the responsibility for partitioning is shared among 
different entities. These are:</p>
+<ol>
+<li>A <strong>partitioner</strong> that specifies <em>how</em> to partition 
the operator, specifically it takes an old set of partitions and creates a new 
set of partitions. At the start of the application the old set has one 
partition and the partitioner can return more than one partitions to start the 
application with multiple partitions. The partitioner can have any custom JAVA 
logic to determine the number of new partitions, set their initial state as a 
brand new state or derive it from the state of the old partitions. It also 
specifies how the data gets distributed among the new partitions. The new set 
doesn't have to contain only new partitions, it can carry over some old 
partitions if desired.</li>
+<li>An optional <strong>statistics (stats) listener</strong> that specifies 
<em>when</em> to partition. The reason it is optional is that it is needed only 
when dynamic partitioning is needed. With the stats listener, the stats can be 
used to determine when to partition.</li>
+<li>In some cases the <em>operator</em> itself should be aware of partitioning 
and would need to provide supporting code.<ul>
+<li>In case of input operators each partition should have a property or a set 
of properties that allow it to distinguish itself from the other partitions and 
fetch unique data.</li>
+</ul>
+</li>
+<li>When an operator that was originally a single instance is split into 
multiple partitions with each partition working on a subset of data, the 
results of the partitions may need to be combined together to compute the final 
result. The combining logic would depend on the logic of the operator. This 
would be specified by the developer using a <strong>Unifier</strong>, which is 
deployed as another operator by the platform. If no <em>Unifier</em> is 
specified, the platform inserts a <strong>default unifier</strong> that merges 
the results of the multiple partition streams into a single stream. Each output 
port can have a different <em>Unifier</em> and this is specified by returning 
the corresponding <em>Unifier</em> in the <strong>getUnifier</strong> method of 
the output port. The operator developer should provide a custom 
<em>Unifier</em> wherever applicable.</li>
+<li>The Apex <em>engine</em> that brings everything together and effects the 
partitioning.</li>
+</ol>
+<p>Since partitioning is critical for scalability of applications, operators 
must support it. There should be a strong reason for an operator to not support 
partitioning, such as, the logic performed by the operator not lending itself 
to parallelism. In order to support partitioning, an operator developer, apart 
from developing the functionality of the operator, may also need to provide a 
partitioner, stats listener and supporting code in the operator as described in 
the steps above. The next sections delve into this. </p>
+<h3 id="out-of-the-box-partitioning">Out of the box partitioning</h3>
+<p>The platform comes with some built-in partitioning utilities that can be 
used in certain scenarios.</p>
+<ul>
+<li>
+<p><strong>StatelessPartitioner</strong> provides a default partitioner, that 
can be used for an operator in certain conditions. If the operator satisfies 
these conditions, the partitioner can be specified for the operator with a 
simple setting and no other partitioning code is needed. The conditions are:</p>
+<ul>
+<li>No dynamic partitioning is needed, see next point about dynamic 
partitioning. </li>
+<li>There is no distinct initial state for the partitions, i.e., all 
partitions start with the same initial state submitted during application 
launch.</li>
+</ul>
+<p>Typically input or output operators do not fall into this category, 
although there are some exceptions. This partitioner is mainly used with 
operators that are in the middle of the DAG, after the input and before the 
output operators. When used with non-input operators, only the data for the 
first declared input port is distributed among the different partitions. All 
other input ports are treated as broadcast and all partitions receive all the 
data for that port.</p>
+</li>
+<li>
+<p><strong>StatelessThroughputBasedPartitioner</strong> in Malhar provides a 
dynamic partitioner based on throughput thresholds. Similarly 
<strong>StatelessLatencyBasedPartitioner</strong> provides a latency based 
dynamic partitioner in RTS. If these partitioners can be used, then separate 
partitioning related code is not needed. The conditions under which these can 
be used are:</p>
+<ul>
+<li>There is no distinct initial state for the partitions.</li>
+<li>There is no state being carried over by the operator from one window to 
the next i.e., operator is stateless.</li>
+</ul>
+</li>
+</ul>
+<h3 id="custom-partitioning">Custom partitioning</h3>
+<p>In many cases, operators don’t satisfy the above conditions and a 
built-in partitioner cannot be used. Custom partitioning code needs to be 
written by the operator developer. Below are guidelines for it.</p>
+<ul>
+<li>Since the operator developer is providing a <em>partitioner</em> for the 
operator, the partitioning code should be added to the operator itself by 
making the operator implement the Partitioner interface and implementing the 
required methods, rather than creating a separate partitioner. The advantage is 
the user of the operator does not have to explicitly figure out the partitioner 
and set it for the operator but still has the option to override this built-in 
partitioner with a different one.</li>
+<li>The <em>partitioner</em> is responsible for setting the initial state of 
the new partitions, whether it is at the start of the application or when 
partitioning is happening while the application is running as in the dynamic 
partitioning case. In the dynamic partitioning scenario, the partitioner needs 
to take the state from the old partitions and distribute it among the new 
partitions. It is important to note that apart from the checkpointed state the 
partitioner also needs to distribute idempotent state.</li>
+<li>The <em>partitioner</em> interface has two methods, 
<strong>definePartitions</strong> and <strong>partitioned</strong>. The method 
<em>definePartitons</em> is first called to determine the new partitions, and 
if enough resources are available on the cluster, the <em>partitioned</em> 
method is called passing in the new partitions. This happens both during 
initial partitioning and dynamic partitioning. If resources are not available, 
partitioning is abandoned and existing partitions continue to run untouched. 
This means that any processing intensive operations should be deferred to the 
<em>partitioned</em> call instead of doing them in <em>definePartitions</em>, 
as they may not be needed if there are not enough resources available in the 
cluster.</li>
+<li>The <em>partitioner</em>, along with creating the new partitions, should 
also specify how the data gets distributed across the new partitions. It should 
do this by specifying a mapping called <strong>PartitionKeys</strong> for each 
partition that maps the data to that partition. This mapping needs to be 
specified for every input port in the operator. If the <em>partitioner</em> 
wants to use the standard mapping it can use a utility method called 
<strong>DefaultPartition.assignPartitionKeys</strong>.</li>
+<li>When the partitioner is scaling the operator up to more partitions, try to 
reuse the existing partitions and create new partitions to augment the current 
set. The reuse can be achieved by the partitioner returning the current 
partitions unchanged. This will result in the current partitions continuing to 
run untouched.</li>
+<li>In case of dynamic partitioning, as mentioned earlier, a stats listener is 
also needed to determine when to re-partition. Like the <em>Partitioner</em> 
interface, the operator can also implement the <em>StatsListener</em> interface 
to provide a stats listener implementation that will be automatically used.</li>
+<li>The <em>StatsListener</em> has access to all operator statistics to make 
its decision on partitioning. Apart from the statistics that the platform 
computes for the operators such as throughput, latency etc, operator developers 
can include their own business metrics by using the AutoMetric feature.</li>
+<li>If the operator is not partitionable, mark it so with 
<em>OperatorAnnotation</em> and <em>partitionable</em> element set to 
false.</li>
+</ul>
+<h3 id="streamcodecs">StreamCodecs</h3>
+<p>A <strong>StreamCodec</strong> is used in partitioning to distribute the 
data tuples among the partitions. The <em>StreamCodec</em> computes an integer 
hashcode for a data tuple and this is used along with <em>PartitionKeys</em> 
mapping to determine which partition or partitions receive the data tuple. If a 
<em>StreamCodec</em> is not specified, then a default one is used by the 
platform which returns the JAVA hashcode of the tuple. </p>
+<p><em>StreamCodec</em> is also useful in another aspect of the application. 
It is used to serialize and deserialize the tuple to transfer it between 
operators. The default <em>StreamCodec</em> uses Kryo library for 
serialization. </p>
+<p>The following guidelines are useful when considering a custom 
<em>StreamCodec</em></p>
+<ul>
+<li>A custom <em>StreamCodec</em> is needed if the tuples need to be 
distributed based on a criteria different from the hashcode of the tuple. If 
the correct working of an operator depends on the data from the upstream 
operator being distributed using a custom criteria such as being sticky on a 
“key” field within the tuple, then a custom <em>StreamCodec</em> should be 
provided by the operator developer. This codec can implement the custom 
criteria. The operator should also return this custom codec in the 
<strong>getStreamCodec</strong> method of the input port.</li>
+<li>When implementing a custom <em>StreamCodec</em> for the purpose of using a 
different criteria to distribute the tuples, the codec can extend an existing 
<em>StreamCodec</em> and implement the hashcode method, so that the codec does 
not have to worry about the serialization and deserialization functionality. 
The Apex platform provides two pre-built <em>StreamCodec</em> implementations 
for this purpose, one is <strong>KryoSerializableStreamCodec</strong> that uses 
Kryo for serialization and another one 
<strong>JavaSerializationStreamCodec</strong> that uses JAVA serialization.</li>
+<li>Different <em>StreamCodec</em> implementations can be used for different 
inputs in a stream with multiple inputs when different criteria of distributing 
the tuples is desired between the multiple inputs. </li>
+</ul>
+<h2 id="threads">Threads</h2>
+<p>The operator lifecycle methods such as <strong>setup</strong>, 
<strong>beginWindow</strong>, <strong>endWindow</strong>, 
<strong>process</strong> in <em>InputPorts</em> are all called from a single 
operator lifecycle thread, by the platform, unbeknownst to the user. So the 
user does not have to worry about dealing with the issues arising from 
multi-threaded code. Use of separate threads in an operator is discouraged 
because in most cases the motivation for this is parallelism, but parallelism 
can already be achieved by using multiple partitions and furthermore mistakes 
can be made easily when writing multi-threaded code. When dealing with high 
volume and velocity data, the corner cases with incorrectly written 
multi-threaded code are encountered more easily and exposed. However, there are 
times when separate threads are needed, for example, when interacting with 
external systems the delay in retrieving or sending data can be large at times, 
blocking the operator and other DAG pro
 cessing such as committed windows. In these cases the following guidelines 
must be followed strictly.</p>
+<ul>
+<li>Threads should be started in <strong>activate</strong> and stopped in 
<strong>deactivate</strong>. In <em>deactivate</em> the operator should wait 
till any threads it launched, have finished execution. It can do so by calling 
<strong>join</strong> on the threads or if using 
<strong>ExecutorService</strong>, calling <strong>awaitTermination</strong> on 
the service.</li>
+<li>Threads should not call any methods on the ports directly as this can 
cause concurrency exceptions and also result in invalid states.</li>
+<li>Threads can share state with the lifecycle methods using data structures 
that are either explicitly protected by synchronization or are inherently 
thread safe such as thread safe queues.</li>
+<li>If this shared state needs to be protected against failure then it needs 
to be persisted during checkpoint. To have a consistent checkpoint, the state 
should not be modified by the thread when it is being serialized and saved by 
the operator lifecycle thread during checkpoint. Since the checkpoint process 
happens outside the window boundary the thread should be quiesced between 
<strong>endWindow</strong> and <strong>beginWindow</strong> or more efficiently 
between pre-checkpoint and checkpointed callbacks.</li>
+</ul>
+              
+            </div>
+          </div>
+          <footer>
+  
+    <div class="rst-footer-buttons" role="navigation" aria-label="footer 
navigation">
+      
+        <a href="../apex_cli/" class="btn btn-neutral float-right" title="Apex 
CLI">Next <span class="icon icon-circle-arrow-right"></span></a>
+      
+      
+        <a href="../autometrics/" class="btn btn-neutral" title="AutoMetric 
API"><span class="icon icon-circle-arrow-left"></span> Previous</a>
+      
+    </div>
+  
+
+  <hr/>
+
+  <div role="contentinfo">
+    <!-- Copyright etc -->
+    
+  </div>
+
+  Built with <a href="http://www.mkdocs.org";>MkDocs</a> using a <a 
href="https://github.com/snide/sphinx_rtd_theme";>theme</a> provided by <a 
href="https://readthedocs.org";>Read the Docs</a>.
+</footer>
+         
+        </div>
+      </div>
+
+    </section>
+
+  </div>
+
+<div class="rst-versions" role="note" style="cursor: pointer">
+    <span class="rst-current-version" data-toggle="rst-current-version">
+      
+      
+        <span><a href="../autometrics/" style="color: #fcfcfc;">&laquo; 
Previous</a></span>
+      
+      
+        <span style="margin-left: 15px"><a href="../apex_cli/" style="color: 
#fcfcfc">Next &raquo;</a></span>
+      
+    </span>
+</div>
+
+</body>
+</html>

http://git-wip-us.apache.org/repos/asf/apex-site/blob/21e76a00/docs/apex-3.4/images/security/image03.png
----------------------------------------------------------------------
diff --git a/docs/apex-3.4/images/security/image03.png 
b/docs/apex-3.4/images/security/image03.png
new file mode 100755
index 0000000..175feb8
Binary files /dev/null and b/docs/apex-3.4/images/security/image03.png differ

http://git-wip-us.apache.org/repos/asf/apex-site/blob/21e76a00/docs/apex-3.4/index.html
----------------------------------------------------------------------
diff --git a/docs/apex-3.4/index.html b/docs/apex-3.4/index.html
index c944ecc..56fcb03 100644
--- a/docs/apex-3.4/index.html
+++ b/docs/apex-3.4/index.html
@@ -109,6 +109,13 @@
     </li>
 
         
+            
+    <li class="toctree-l1 ">
+        <a class="" href="development_best_practices/">Best Practices</a>
+        
+    </li>
+
+        
     </ul>
 <li>
           
@@ -184,7 +191,7 @@
 <li>Simple API supports generic Java code</li>
 </ul>
 <p>Platform has been demonstated to scale linearly across Hadoop clusters 
under extreme loads of billions of events per second.  Hardware and process 
failures are quickly recovered with HDFS-backed checkpointing and automatic 
operator recovery, preserving application state and resuming execution in 
seconds.  Functional and operational specifications are separated.  Apex 
provides a simple API, which enables users to write generic, reusable code.  
The code is dropped in as-is and platform automatically handles the various 
operational concerns, such as state management, fault tolerance, scalability, 
security, metrics, etc.  This frees users to focus on functional development, 
and lets platform provide operability support.</p>
-<p>The core Apex platform is supplemented by Malhar, a library of connector 
and logic functions, enabling rapid application development.  These operators 
and modules provide access to HDFS, S3, NFS, FTP, and other file systems; 
Kafka, ActiveMQ, RabbitMQ, JMS, and other message systems; MySql, Cassandra, 
MongoDB, Redis, HBase, CouchDB, generic JDBC, and other database connectors.  
In addition to the operators, the library contains a number of demos 
applications, demonstrating operator features and capabilities.  To see the 
full list of available operators and related documentation, visit <a 
href="https://github.com/apache/incubator-apex-malhar";>Apex Malhar on 
Github</a></p>
+<p>The core Apex platform is supplemented by Malhar, a library of connector 
and logic functions, enabling rapid application development.  These operators 
and modules provide access to HDFS, S3, NFS, FTP, and other file systems; 
Kafka, ActiveMQ, RabbitMQ, JMS, and other message systems; MySql, Cassandra, 
MongoDB, Redis, HBase, CouchDB, generic JDBC, and other database connectors.  
In addition to the operators, the library contains a number of demos 
applications, demonstrating operator features and capabilities.  To see the 
full list of available operators and related documentation, visit <a 
href="https://github.com/apache/apex-malhar";>Apex Malhar on Github</a></p>
 <p>For additional information visit <a href="http://apex.apache.org/";>Apache 
Apex</a>.</p>
 <p><a href="http://apex.apache.org/";><img alt="" src="./favicon.ico" /></a></p>
               
@@ -232,5 +239,5 @@
 
 <!--
 MkDocs version : 0.15.3
-Build Date UTC : 2016-05-13 22:25:11.258707
+Build Date UTC : 2016-09-07 01:53:39.631895
 -->

http://git-wip-us.apache.org/repos/asf/apex-site/blob/21e76a00/docs/apex-3.4/license/highlight.js/LICENSE
----------------------------------------------------------------------
diff --git a/docs/apex-3.4/license/highlight.js/LICENSE 
b/docs/apex-3.4/license/highlight.js/LICENSE
deleted file mode 100644
index 422deb7..0000000
--- a/docs/apex-3.4/license/highlight.js/LICENSE
+++ /dev/null
@@ -1,24 +0,0 @@
-Copyright (c) 2006, Ivan Sagalaev
-All rights reserved.
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
-    * Redistributions of source code must retain the above copyright
-      notice, this list of conditions and the following disclaimer.
-    * Redistributions in binary form must reproduce the above copyright
-      notice, this list of conditions and the following disclaimer in the
-      documentation and/or other materials provided with the distribution.
-    * Neither the name of highlight.js nor the names of its contributors 
-      may be used to endorse or promote products derived from this software 
-      without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
-EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
-DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

http://git-wip-us.apache.org/repos/asf/apex-site/blob/21e76a00/docs/apex-3.4/main.html
----------------------------------------------------------------------
diff --git a/docs/apex-3.4/main.html b/docs/apex-3.4/main.html
new file mode 100644
index 0000000..79c9f4e
--- /dev/null
+++ b/docs/apex-3.4/main.html
@@ -0,0 +1,10 @@
+{% extends "base.html" %}
+
+{#
+The entry point for the ReadTheDocs Theme.
+ 
+Any theme customisations should override this file to redefine blocks defined 
in
+the various templates. The custom theme should only need to define a main.html
+which `{% extends "base.html" %}` and defines various blocks which will replace
+the blocks defined in base.html and its included child templates.
+#}

http://git-wip-us.apache.org/repos/asf/apex-site/blob/21e76a00/docs/apex-3.4/mkdocs/js/lunr-0.5.7.min.js
----------------------------------------------------------------------
diff --git a/docs/apex-3.4/mkdocs/js/lunr-0.5.7.min.js 
b/docs/apex-3.4/mkdocs/js/lunr-0.5.7.min.js
new file mode 100644
index 0000000..b72449a
--- /dev/null
+++ b/docs/apex-3.4/mkdocs/js/lunr-0.5.7.min.js
@@ -0,0 +1,7 @@
+/**
+ * lunr - http://lunrjs.com - A bit like Solr, but much smaller and not as 
bright - 0.5.7
+ * Copyright (C) 2014 Oliver Nightingale
+ * MIT Licensed
+ * @license
+ */
+!function(){var t=function(e){var n=new t.Index;return 
n.pipeline.add(t.trimmer,t.stopWordFilter,t.stemmer),e&&e.call(n,n),n};t.version="0.5.7",t.utils={},t.utils.warn=function(t){return
 
function(e){t.console&&console.warn&&console.warn(e)}}(this),t.EventEmitter=function(){this.events={}},t.EventEmitter.prototype.addListener=function(){var
 t=Array.prototype.slice.call(arguments),e=t.pop(),n=t;if("function"!=typeof 
e)throw new TypeError("last argument must be a 
function");n.forEach(function(t){this.hasHandler(t)||(this.events[t]=[]),this.events[t].push(e)},this)},t.EventEmitter.prototype.removeListener=function(t,e){if(this.hasHandler(t)){var
 
n=this.events[t].indexOf(e);this.events[t].splice(n,1),this.events[t].length||delete
 
this.events[t]}},t.EventEmitter.prototype.emit=function(t){if(this.hasHandler(t)){var
 
e=Array.prototype.slice.call(arguments,1);this.events[t].forEach(function(t){t.apply(void
 0,e)})}},t.EventEmitter.prototype.hasHandler=function(t){return t in 
this.events},t.to
 kenizer=function(t){if(!arguments.length||null==t||void 
0==t)return[];if(Array.isArray(t))return t.map(function(t){return 
t.toLowerCase()});for(var 
e=t.toString().replace(/^\s+/,""),n=e.length-1;n>=0;n--)if(/\S/.test(e.charAt(n))){e=e.substring(0,n+1);break}return
 e.split(/(?:\s+|\-)/).filter(function(t){return!!t}).map(function(t){return 
t.toLowerCase()})},t.Pipeline=function(){this._stack=[]},t.Pipeline.registeredFunctions={},t.Pipeline.registerFunction=function(e,n){n
 in this.registeredFunctions&&t.utils.warn("Overwriting existing registered 
function: 
"+n),e.label=n,t.Pipeline.registeredFunctions[e.label]=e},t.Pipeline.warnIfFunctionNotRegistered=function(e){var
 n=e.label&&e.label in this.registeredFunctions;n||t.utils.warn("Function is 
not registered with pipeline. This may cause problems when serialising the 
index.\n",e)},t.Pipeline.load=function(e){var n=new t.Pipeline;return 
e.forEach(function(e){var i=t.Pipeline.registeredFunctions[e];if(!i)throw new 
Error("Cannot load un-re
 gistered function: "+e);n.add(i)}),n},t.Pipeline.prototype.add=function(){var 
e=Array.prototype.slice.call(arguments);e.forEach(function(e){t.Pipeline.warnIfFunctionNotRegistered(e),this._stack.push(e)},this)},t.Pipeline.prototype.after=function(e,n){t.Pipeline.warnIfFunctionNotRegistered(n);var
 
i=this._stack.indexOf(e)+1;this._stack.splice(i,0,n)},t.Pipeline.prototype.before=function(e,n){t.Pipeline.warnIfFunctionNotRegistered(n);var
 
i=this._stack.indexOf(e);this._stack.splice(i,0,n)},t.Pipeline.prototype.remove=function(t){var
 
e=this._stack.indexOf(t);this._stack.splice(e,1)},t.Pipeline.prototype.run=function(t){for(var
 e=[],n=t.length,i=this._stack.length,o=0;n>o;o++){for(var 
r=t[o],s=0;i>s&&(r=this._stack[s](r,o,t),void 0!==r);s++);void 
0!==r&&e.push(r)}return 
e},t.Pipeline.prototype.reset=function(){this._stack=[]},t.Pipeline.prototype.toJSON=function(){return
 this._stack.map(function(e){return 
t.Pipeline.warnIfFunctionNotRegistered(e),e.label})},t.Vector=function(){this._magni
 tude=null,this.list=void 
0,this.length=0},t.Vector.Node=function(t,e,n){this.idx=t,this.val=e,this.next=n},t.Vector.prototype.insert=function(e,n){var
 i=this.list;if(!i)return this.list=new 
t.Vector.Node(e,n,i),this.length++;for(var o=i,r=i.next;void 
0!=r;){if(e<r.idx)return o.next=new 
t.Vector.Node(e,n,r),this.length++;o=r,r=r.next}return o.next=new 
t.Vector.Node(e,n,r),this.length++},t.Vector.prototype.magnitude=function(){if(this._magniture)return
 this._magnitude;for(var t,e=this.list,n=0;e;)t=e.val,n+=t*t,e=e.next;return 
this._magnitude=Math.sqrt(n)},t.Vector.prototype.dot=function(t){for(var 
e=this.list,n=t.list,i=0;e&&n;)e.idx<n.idx?e=e.next:e.idx>n.idx?n=n.next:(i+=e.val*n.val,e=e.next,n=n.next);return
 i},t.Vector.prototype.similarity=function(t){return 
this.dot(t)/(this.magnitude()*t.magnitude())},t.SortedSet=function(){this.length=0,this.elements=[]},t.SortedSet.load=function(t){var
 e=new this;return 
e.elements=t,e.length=t.length,e},t.SortedSet.prototype.add=function(){Arr
 
ay.prototype.slice.call(arguments).forEach(function(t){~this.indexOf(t)||this.elements.splice(this.locationFor(t),0,t)},this),this.length=this.elements.length},t.SortedSet.prototype.toArray=function(){return
 this.elements.slice()},t.SortedSet.prototype.map=function(t,e){return 
this.elements.map(t,e)},t.SortedSet.prototype.forEach=function(t,e){return 
this.elements.forEach(t,e)},t.SortedSet.prototype.indexOf=function(t,e,n){var 
e=e||0,n=n||this.elements.length,i=n-e,o=e+Math.floor(i/2),r=this.elements[o];return
 1>=i?r===t?o:-1:t>r?this.indexOf(t,o,n):r>t?this.indexOf(t,e,o):r===t?o:void 
0},t.SortedSet.prototype.locationFor=function(t,e,n){var 
e=e||0,n=n||this.elements.length,i=n-e,o=e+Math.floor(i/2),r=this.elements[o];if(1>=i){if(r>t)return
 o;if(t>r)return o+1}return 
t>r?this.locationFor(t,o,n):r>t?this.locationFor(t,e,o):void 
0},t.SortedSet.prototype.intersect=function(e){for(var n=new 
t.SortedSet,i=0,o=0,r=this.length,s=e.length,a=this.elements,h=e.elements;;){if(i>r-1||o>s-1)brea
 k;a[i]!==h[o]?a[i]<h[o]?i++:a[i]>h[o]&&o++:(n.add(a[i]),i++,o++)}return 
n},t.SortedSet.prototype.clone=function(){var e=new t.SortedSet;return 
e.elements=this.toArray(),e.length=e.elements.length,e},t.SortedSet.prototype.union=function(t){var
 e,n,i;return 
this.length>=t.length?(e=this,n=t):(e=t,n=this),i=e.clone(),i.add.apply(i,n.toArray()),i},t.SortedSet.prototype.toJSON=function(){return
 
this.toArray()},t.Index=function(){this._fields=[],this._ref="id",this.pipeline=new
 t.Pipeline,this.documentStore=new t.Store,this.tokenStore=new 
t.TokenStore,this.corpusTokens=new t.SortedSet,this.eventEmitter=new 
t.EventEmitter,this._idfCache={},this.on("add","remove","update",function(){this._idfCache={}}.bind(this))},t.Index.prototype.on=function(){var
 t=Array.prototype.slice.call(arguments);return 
this.eventEmitter.addListener.apply(this.eventEmitter,t)},t.Index.prototype.off=function(t,e){return
 
this.eventEmitter.removeListener(t,e)},t.Index.load=function(e){e.version!==t.version&&t.utils.wa
 rn("version mismatch: current "+t.version+" importing "+e.version);var n=new 
this;return 
n._fields=e.fields,n._ref=e.ref,n.documentStore=t.Store.load(e.documentStore),n.tokenStore=t.TokenStore.load(e.tokenStore),n.corpusTokens=t.SortedSet.load(e.corpusTokens),n.pipeline=t.Pipeline.load(e.pipeline),n},t.Index.prototype.field=function(t,e){var
 e=e||{},n={name:t,boost:e.boost||1};return 
this._fields.push(n),this},t.Index.prototype.ref=function(t){return 
this._ref=t,this},t.Index.prototype.add=function(e,n){var i={},o=new 
t.SortedSet,r=e[this._ref],n=void 
0===n?!0:n;this._fields.forEach(function(n){var 
r=this.pipeline.run(t.tokenizer(e[n.name]));i[n.name]=r,t.SortedSet.prototype.add.apply(o,r)},this),this.documentStore.set(r,o),t.SortedSet.prototype.add.apply(this.corpusTokens,o.toArray());for(var
 s=0;s<o.length;s++){var 
a=o.elements[s],h=this._fields.reduce(function(t,e){var 
n=i[e.name].length;if(!n)return t;var o=i[e.name].filter(function(t){return 
t===a}).length;return t+o/n*e.boost}
 
,0);this.tokenStore.add(a,{ref:r,tf:h})}n&&this.eventEmitter.emit("add",e,this)},t.Index.prototype.remove=function(t,e){var
 n=t[this._ref],e=void 0===e?!0:e;if(this.documentStore.has(n)){var 
i=this.documentStore.get(n);this.documentStore.remove(n),i.forEach(function(t){this.tokenStore.remove(t,n)},this),e&&this.eventEmitter.emit("remove",t,this)}},t.Index.prototype.update=function(t,e){var
 e=void 
0===e?!0:e;this.remove(t,!1),this.add(t,!1),e&&this.eventEmitter.emit("update",t,this)},t.Index.prototype.idf=function(t){var
 e="@"+t;if(Object.prototype.hasOwnProperty.call(this._idfCache,e))return 
this._idfCache[e];var n=this.tokenStore.count(t),i=1;return 
n>0&&(i=1+Math.log(this.tokenStore.length/n)),this._idfCache[e]=i},t.Index.prototype.search=function(e){var
 n=this.pipeline.run(t.tokenizer(e)),i=new 
t.Vector,o=[],r=this._fields.reduce(function(t,e){return 
t+e.boost},0),s=n.some(function(t){return 
this.tokenStore.has(t)},this);if(!s)return[];n.forEach(function(e,n,s){var 
a=1/s.length*t
 
his._fields.length*r,h=this,u=this.tokenStore.expand(e).reduce(function(n,o){var
 r=h.corpusTokens.indexOf(o),s=h.idf(o),u=1,l=new t.SortedSet;if(o!==e){var 
c=Math.max(3,o.length-e.length);u=1/Math.log(c)}return 
r>-1&&i.insert(r,a*s*u),Object.keys(h.tokenStore.get(o)).forEach(function(t){l.add(t)}),n.union(l)},new
 t.SortedSet);o.push(u)},this);var a=o.reduce(function(t,e){return 
t.intersect(e)});return 
a.map(function(t){return{ref:t,score:i.similarity(this.documentVector(t))}},this).sort(function(t,e){return
 e.score-t.score})},t.Index.prototype.documentVector=function(e){for(var 
n=this.documentStore.get(e),i=n.length,o=new t.Vector,r=0;i>r;r++){var 
s=n.elements[r],a=this.tokenStore.get(s)[e].tf,h=this.idf(s);o.insert(this.corpusTokens.indexOf(s),a*h)}return
 
o},t.Index.prototype.toJSON=function(){return{version:t.version,fields:this._fields,ref:this._ref,documentStore:this.documentStore.toJSON(),tokenStore:this.tokenStore.toJSON(),corpusTokens:this.corpusTokens.toJSON(),pipeline:this.
 pipeline.toJSON()}},t.Index.prototype.use=function(t){var 
e=Array.prototype.slice.call(arguments,1);e.unshift(this),t.apply(this,e)},t.Store=function(){this.store={},this.length=0},t.Store.load=function(e){var
 n=new this;return 
n.length=e.length,n.store=Object.keys(e.store).reduce(function(n,i){return 
n[i]=t.SortedSet.load(e.store[i]),n},{}),n},t.Store.prototype.set=function(t,e){this.has(t)||this.length++,this.store[t]=e},t.Store.prototype.get=function(t){return
 this.store[t]},t.Store.prototype.has=function(t){return t in 
this.store},t.Store.prototype.remove=function(t){this.has(t)&&(delete 
this.store[t],this.length--)},t.Store.prototype.toJSON=function(){return{store:this.store,length:this.length}},t.stemmer=function(){var
 
t={ational:"ate",tional:"tion",enci:"ence",anci:"ance",izer:"ize",bli:"ble",alli:"al",entli:"ent",eli:"e",ousli:"ous",ization:"ize",ation:"ate",ator:"ate",alism:"al",iveness:"ive",fulness:"ful",ousness:"ous",aliti:"al",iviti:"ive",biliti:"ble",logi:"log"},e={ica
 
te:"ic",ative:"",alize:"al",iciti:"ic",ical:"ic",ful:"",ness:""},n="[^aeiou]",i="[aeiouy]",o=n+"[^aeiouy]*",r=i+"[aeiou]*",s="^("+o+")?"+r+o,a="^("+o+")?"+r+o+"("+r+")?$",h="^("+o+")?"+r+o+r+o,u="^("+o+")?"+i,l=new
 RegExp(s),c=new RegExp(h),p=new RegExp(a),f=new 
RegExp(u),d=/^(.+?)(ss|i)es$/,v=/^(.+?)([^s])s$/,m=/^(.+?)eed$/,g=/^(.+?)(ed|ing)$/,y=/.$/,S=/(at|bl|iz)$/,w=new
 RegExp("([^aeiouylsz])\\1$"),x=new 
RegExp("^"+o+i+"[^aeiouwxy]$"),k=/^(.+?[^aeiou])y$/,b=/^(.+?)(ational|tional|enci|anci|izer|bli|alli|entli|eli|ousli|ization|ation|ator|alism|iveness|fulness|ousness|aliti|iviti|biliti|logi)$/,E=/^(.+?)(icate|ative|alize|iciti|ical|ful|ness)$/,_=/^(.+?)(al|ance|ence|er|ic|able|ible|ant|ement|ment|ent|ou|ism|ate|iti|ous|ive|ize)$/,O=/^(.+?)(s|t)(ion)$/,F=/^(.+?)e$/,P=/ll$/,T=new
 RegExp("^"+o+i+"[^aeiouwxy]$"),$=function(n){var 
i,o,r,s,a,h,u;if(n.length<3)return 
n;if(r=n.substr(0,1),"y"==r&&(n=r.toUpperCase()+n.substr(1)),s=d,a=v,s.test(n)?n=n.replace(s,"$1$2"):a.test(n)&&(n=n.repl
 ace(a,"$1$2")),s=m,a=g,s.test(n)){var 
$=s.exec(n);s=l,s.test($[1])&&(s=y,n=n.replace(s,""))}else if(a.test(n)){var 
$=a.exec(n);i=$[1],a=f,a.test(i)&&(n=i,a=S,h=w,u=x,a.test(n)?n+="e":h.test(n)?(s=y,n=n.replace(s,"")):u.test(n)&&(n+="e"))}if(s=k,s.test(n)){var
 $=s.exec(n);i=$[1],n=i+"i"}if(s=b,s.test(n)){var 
$=s.exec(n);i=$[1],o=$[2],s=l,s.test(i)&&(n=i+t[o])}if(s=E,s.test(n)){var 
$=s.exec(n);i=$[1],o=$[2],s=l,s.test(i)&&(n=i+e[o])}if(s=_,a=O,s.test(n)){var 
$=s.exec(n);i=$[1],s=c,s.test(i)&&(n=i)}else if(a.test(n)){var 
$=a.exec(n);i=$[1]+$[2],a=c,a.test(i)&&(n=i)}if(s=F,s.test(n)){var 
$=s.exec(n);i=$[1],s=c,a=p,h=T,(s.test(i)||a.test(i)&&!h.test(i))&&(n=i)}return 
s=P,a=c,s.test(n)&&a.test(n)&&(s=y,n=n.replace(s,"")),"y"==r&&(n=r.toLowerCase()+n.substr(1)),n};return
 
$}(),t.Pipeline.registerFunction(t.stemmer,"stemmer"),t.stopWordFilter=function(e){return-1===t.stopWordFilter.stopWords.indexOf(e)?e:void
 0},t.stopWordFilter.stopWords=new 
t.SortedSet,t.stopWordFilter.stopWords.length=119
 
,t.stopWordFilter.stopWords.elements=["","a","able","about","across","after","all","almost","also","am","among","an","and","any","are","as","at","be","because","been","but","by","can","cannot","could","dear","did","do","does","either","else","ever","every","for","from","get","got","had","has","have","he","her","hers","him","his","how","however","i","if","in","into","is","it","its","just","least","let","like","likely","may","me","might","most","must","my","neither","no","nor","not","of","off","often","on","only","or","other","our","own","rather","said","say","says","she","should","since","so","some","than","that","the","their","them","then","there","these","they","this","tis","to","too","twas","us","wants","was","we","were","what","when","where","which","while","who","whom","why","will","with","would","yet","you","your"],t.Pipeline.registerFunction(t.stopWordFilter,"stopWordFilter"),t.trimmer=function(t){return
 t.replace(/^\W+/,"").replace(/\W+$/,"")},t.Pipeline.registerFunction(t.tr
 
immer,"trimmer"),t.TokenStore=function(){this.root={docs:{}},this.length=0},t.TokenStore.load=function(t){var
 e=new this;return 
e.root=t.root,e.length=t.length,e},t.TokenStore.prototype.add=function(t,e,n){var
 n=n||this.root,i=t[0],o=t.slice(1);return i in 
n||(n[i]={docs:{}}),0===o.length?(n[i].docs[e.ref]=e,void(this.length+=1)):this.add(o,e,n[i])},t.TokenStore.prototype.has=function(t){if(!t)return!1;for(var
 
e=this.root,n=0;n<t.length;n++){if(!e[t[n]])return!1;e=e[t[n]]}return!0},t.TokenStore.prototype.getNode=function(t){if(!t)return{};for(var
 e=this.root,n=0;n<t.length;n++){if(!e[t[n]])return{};e=e[t[n]]}return 
e},t.TokenStore.prototype.get=function(t,e){return 
this.getNode(t,e).docs||{}},t.TokenStore.prototype.count=function(t,e){return 
Object.keys(this.get(t,e)).length},t.TokenStore.prototype.remove=function(t,e){if(t){for(var
 n=this.root,i=0;i<t.length;i++){if(!(t[i]in n))return;n=n[t[i]]}delete 
n.docs[e]}},t.TokenStore.prototype.expand=function(t,e){var 
n=this.getNode(t),i=n
 .docs||{},e=e||[];return 
Object.keys(i).length&&e.push(t),Object.keys(n).forEach(function(n){"docs"!==n&&e.concat(this.expand(t+n,e))},this),e},t.TokenStore.prototype.toJSON=function(){return{root:this.root,length:this.length}},function(t,e){"function"==typeof
 define&&define.amd?define(e):"object"==typeof 
exports?module.exports=e():t.lunr=e()}(this,function(){return t})}();

Reply via email to