http://git-wip-us.apache.org/repos/asf/falcon/blob/91c68bea/content/0.11/FalconUserExtension.html
----------------------------------------------------------------------
diff --git a/content/0.11/FalconUserExtension.html 
b/content/0.11/FalconUserExtension.html
new file mode 100644
index 0000000..146b28b
--- /dev/null
+++ b/content/0.11/FalconUserExtension.html
@@ -0,0 +1,88 @@
+<!DOCTYPE html>
+<!--
+ | Generated by Apache Maven Doxia at 2018-03-12
+ | Rendered using Apache Maven Fluido Skin 1.3.0
+-->
+<html xmlns="http://www.w3.org/1999/xhtml"; xml:lang="en" lang="en">
+  <head>
+    <meta charset="UTF-8" />
+    <meta name="viewport" content="width=device-width, initial-scale=1.0" />
+    <meta name="Date-Revision-yyyymmdd" content="20180312" />
+    <meta http-equiv="Content-Language" content="en" />
+    <title>Falcon - </title>
+    <link rel="stylesheet" href="./css/apache-maven-fluido-1.3.0.min.css" />
+    <link rel="stylesheet" href="./css/site.css" />
+    <link rel="stylesheet" href="./css/print.css" media="print" />
+
+      
+    <script type="text/javascript" 
src="./js/apache-maven-fluido-1.3.0.min.js"></script>
+
+                          
+        
+<script type="text/javascript">$( document ).ready( function() { $( 
'.carousel' ).carousel( { interval: 3500 } ) } );</script>
+          
+            </head>
+        <body class="topBarDisabled">
+          
+                        
+                    
+    
+        <div class="container">
+          <div id="banner">
+        <div class="pull-left">
+                                <div id="bannerLeft">
+                                                                               
                 <img src="images/falcon-logo.png"  alt="Apache Falcon" 
width="200px" height="45px"/>
+                </div>
+                      </div>
+        <div class="pull-right">  </div>
+        <div class="clear"><hr/></div>
+      </div>
+
+      <div id="breadcrumbs">
+        <ul class="breadcrumb">
+                
+                    
+                              <li class="">
+                    <a href="index.html" title="Falcon">
+        Falcon</a>
+        </li>
+      <li class="divider ">/</li>
+        <li class=""></li>
+        
+                
+                    
+                  <li id="publishDate" class="pull-right">Last Published: 
2018-03-12</li> <li class="divider pull-right">|</li>
+              <li id="projectVersion" class="pull-right">Version: 0.11</li>
+            
+                            </ul>
+      </div>
+
+      
+                
+        <div id="bodyColumn" >
+                                  
+            
+                  </div>
+          </div>
+
+    <hr/>
+
+    <footer>
+            <div class="container">
+              <div class="row span12">Copyright &copy;                    
2013-2018
+                        <a href="http://www.apache.org";>Apache Software 
Foundation</a>.
+            All Rights Reserved.      
+                    
+      </div>
+
+                          
+                <p id="poweredBy" class="pull-right">
+                          <a href="http://maven.apache.org/"; title="Built by 
Maven" class="poweredBy">
+        <img class="builtBy" alt="Built by Maven" 
src="./images/logos/maven-feather.png" />
+      </a>
+              </p>
+        
+                </div>
+    </footer>
+  </body>
+</html>

http://git-wip-us.apache.org/repos/asf/falcon/blob/91c68bea/content/0.11/FeedSchedule.png
----------------------------------------------------------------------
diff --git a/content/0.11/FeedSchedule.png b/content/0.11/FeedSchedule.png
new file mode 100644
index 0000000..105c6b1
Binary files /dev/null and b/content/0.11/FeedSchedule.png differ

http://git-wip-us.apache.org/repos/asf/falcon/blob/91c68bea/content/0.11/HDFSMirroring.html
----------------------------------------------------------------------
diff --git a/content/0.11/HDFSMirroring.html b/content/0.11/HDFSMirroring.html
new file mode 100644
index 0000000..bff8aa8
--- /dev/null
+++ b/content/0.11/HDFSMirroring.html
@@ -0,0 +1,118 @@
+<!DOCTYPE html>
+<!--
+ | Generated by Apache Maven Doxia at 2018-03-12
+ | Rendered using Apache Maven Fluido Skin 1.3.0
+-->
+<html xmlns="http://www.w3.org/1999/xhtml"; xml:lang="en" lang="en">
+  <head>
+    <meta charset="UTF-8" />
+    <meta name="viewport" content="width=device-width, initial-scale=1.0" />
+    <meta name="Date-Revision-yyyymmdd" content="20180312" />
+    <meta http-equiv="Content-Language" content="en" />
+    <title>Falcon - HDFS mirroring Extension</title>
+    <link rel="stylesheet" href="./css/apache-maven-fluido-1.3.0.min.css" />
+    <link rel="stylesheet" href="./css/site.css" />
+    <link rel="stylesheet" href="./css/print.css" media="print" />
+
+      
+    <script type="text/javascript" 
src="./js/apache-maven-fluido-1.3.0.min.js"></script>
+
+                          
+        
+<script type="text/javascript">$( document ).ready( function() { $( 
'.carousel' ).carousel( { interval: 3500 } ) } );</script>
+          
+            </head>
+        <body class="topBarDisabled">
+          
+                        
+                    
+    
+        <div class="container">
+          <div id="banner">
+        <div class="pull-left">
+                                <div id="bannerLeft">
+                                                                               
                 <img src="images/falcon-logo.png"  alt="Apache Falcon" 
width="200px" height="45px"/>
+                </div>
+                      </div>
+        <div class="pull-right">  </div>
+        <div class="clear"><hr/></div>
+      </div>
+
+      <div id="breadcrumbs">
+        <ul class="breadcrumb">
+                
+                    
+                              <li class="">
+                    <a href="index.html" title="Falcon">
+        Falcon</a>
+        </li>
+      <li class="divider ">/</li>
+        <li class="">HDFS mirroring Extension</li>
+        
+                
+                    
+                  <li id="publishDate" class="pull-right">Last Published: 
2018-03-12</li> <li class="divider pull-right">|</li>
+              <li id="projectVersion" class="pull-right">Version: 0.11</li>
+            
+                            </ul>
+      </div>
+
+      
+                
+        <div id="bodyColumn" >
+                                  
+            <div class="section">
+<h2>HDFS mirroring Extension<a name="HDFS_mirroring_Extension"></a></h2></div>
+<div class="section">
+<h3>Overview<a name="Overview"></a></h3>
+<p>Falcon supports HDFS mirroring extension to replicate data from source 
cluster to destination cluster. This extension implements replicating arbitrary 
directories on HDFS and piggy backs on replication solution in Falcon which 
uses the <a href="./DistCp.html">DistCp</a> tool. It also allows users to 
replicate data from on-premise to cloud, either Azure WASB or S3.</p></div>
+<div class="section">
+<h3>Use Case<a name="Use_Case"></a></h3>
+<p>* Copy directories between HDFS clusters with out dated partitions * 
Archive directories from HDFS to Cloud. Ex: S3, Azure WASB</p></div>
+<div class="section">
+<h3>Limitations<a name="Limitations"></a></h3>
+<p>As the data volume and number of files grow, this can get 
inefficient.</p></div>
+<div class="section">
+<h3>Usage<a name="Usage"></a></h3></div>
+<div class="section">
+<h4>Setup source and destination clusters<a 
name="Setup_source_and_destination_clusters"></a></h4>
+<div class="source">
+<pre>
+    $FALCON_HOME/bin/falcon entity -submit -type cluster -file 
/cluster/definition.xml
+   
+</pre></div></div>
+<div class="section">
+<h4>HDFS mirroring extension properties<a 
name="HDFS_mirroring_extension_properties"></a></h4>
+<p>Extension artifacts are expected to be installed on HDFS at the path 
specified by &quot;extension.store.uri&quot; in startup properties. 
hdfs-mirroring-properties.json file located at 
&quot;&lt;extension.store.uri&gt;/hdfs-mirroring/META/hdfs-mirroring-properties.json&quot;
 lists all the required and optional parameters/arguments for scheduling HDFS 
mirroring job.</p></div>
+<div class="section">
+<h4>Submit and schedule HDFS mirroring extension<a 
name="Submit_and_schedule_HDFS_mirroring_extension"></a></h4>
+<div class="source">
+<pre>
+    $FALCON_HOME/bin/falcon extension -submitAndSchedule -extensionName 
hdfs-mirroring -file /process/definition.xml
+   
+</pre></div>
+<p>Please Refer to <a href="./Falconcli/FalconCLI.html">Falcon CLI</a> and <a 
href="./Restapi/ResourceList.html">REST API</a> for more details on usage of 
CLI and REST API's.</p></div>
+                  </div>
+          </div>
+
+    <hr/>
+
+    <footer>
+            <div class="container">
+              <div class="row span12">Copyright &copy;                    
2013-2018
+                        <a href="http://www.apache.org";>Apache Software 
Foundation</a>.
+            All Rights Reserved.      
+                    
+      </div>
+
+                          
+                <p id="poweredBy" class="pull-right">
+                          <a href="http://maven.apache.org/"; title="Built by 
Maven" class="poweredBy">
+        <img class="builtBy" alt="Built by Maven" 
src="./images/logos/maven-feather.png" />
+      </a>
+              </p>
+        
+                </div>
+    </footer>
+  </body>
+</html>

http://git-wip-us.apache.org/repos/asf/falcon/blob/91c68bea/content/0.11/HdfsSnapshotMirroring.html
----------------------------------------------------------------------
diff --git a/content/0.11/HdfsSnapshotMirroring.html 
b/content/0.11/HdfsSnapshotMirroring.html
new file mode 100644
index 0000000..63f6d3e
--- /dev/null
+++ b/content/0.11/HdfsSnapshotMirroring.html
@@ -0,0 +1,181 @@
+<!DOCTYPE html>
+<!--
+ | Generated by Apache Maven Doxia at 2018-03-12
+ | Rendered using Apache Maven Fluido Skin 1.3.0
+-->
+<html xmlns="http://www.w3.org/1999/xhtml"; xml:lang="en" lang="en">
+  <head>
+    <meta charset="UTF-8" />
+    <meta name="viewport" content="width=device-width, initial-scale=1.0" />
+    <meta name="Date-Revision-yyyymmdd" content="20180312" />
+    <meta http-equiv="Content-Language" content="en" />
+    <title>Falcon - HDFS Snapshot based Mirroring</title>
+    <link rel="stylesheet" href="./css/apache-maven-fluido-1.3.0.min.css" />
+    <link rel="stylesheet" href="./css/site.css" />
+    <link rel="stylesheet" href="./css/print.css" media="print" />
+
+      
+    <script type="text/javascript" 
src="./js/apache-maven-fluido-1.3.0.min.js"></script>
+
+                          
+        
+<script type="text/javascript">$( document ).ready( function() { $( 
'.carousel' ).carousel( { interval: 3500 } ) } );</script>
+          
+            </head>
+        <body class="topBarDisabled">
+          
+                        
+                    
+    
+        <div class="container">
+          <div id="banner">
+        <div class="pull-left">
+                                <div id="bannerLeft">
+                                                                               
                 <img src="images/falcon-logo.png"  alt="Apache Falcon" 
width="200px" height="45px"/>
+                </div>
+                      </div>
+        <div class="pull-right">  </div>
+        <div class="clear"><hr/></div>
+      </div>
+
+      <div id="breadcrumbs">
+        <ul class="breadcrumb">
+                
+                    
+                              <li class="">
+                    <a href="index.html" title="Falcon">
+        Falcon</a>
+        </li>
+      <li class="divider ">/</li>
+        <li class="">HDFS Snapshot based Mirroring</li>
+        
+                
+                    
+                  <li id="publishDate" class="pull-right">Last Published: 
2018-03-12</li> <li class="divider pull-right">|</li>
+              <li id="projectVersion" class="pull-right">Version: 0.11</li>
+            
+                            </ul>
+      </div>
+
+      
+                
+        <div id="bodyColumn" >
+                                  
+            <div class="section">
+<h2>HDFS Snapshot based Mirroring<a 
name="HDFS_Snapshot_based_Mirroring"></a></h2></div>
+<div class="section">
+<h3>Overview<a name="Overview"></a></h3>
+<p>HDFS snapshots are very cost effective to create ( cost is O(1) excluding 
iNode lookup time). Once created, it is very efficient to find modifications 
relative to a snapshot and copy over these modifications for disaster recovery 
(DR). This makes for cost effective HDFS mirroring.</p></div>
+<div class="section">
+<h3>Prerequisites<a name="Prerequisites"></a></h3>
+<p>Following is the prerequisite to use HDFS Snapshot based Mirrroring.</p>
+<p></p>
+<ul>
+<li>Hadoop version 2.7.0 or higher.</li>
+<li>User submitting and scheduling falcon snapshot based mirroring job should 
have permission to create and manage snapshots on both source and target 
directories.</li></ul></div>
+<div class="section">
+<h3>Use Case<a name="Use_Case"></a></h3>
+<p>Create and manage snapshots on source/target directories. Mirror data from 
source to target for disaster recovery using these snapshots. Perform retention 
on the snapshots created on source and target.</p></div>
+<div class="section">
+<h3>Usage<a name="Usage"></a></h3></div>
+<div class="section">
+<h4>Setup<a name="Setup"></a></h4>
+<p></p>
+<ul>
+<li>Submit a source cluster and target cluster entities to Falcon.</li></ul>
+<div class="source">
+<pre>
+    $FALCON_HOME/bin/falcon entity -submit -type cluster -file 
source-cluster-definition.xml
+    $FALCON_HOME/bin/falcon entity -submit -type cluster -file 
target-cluster-definition.xml
+   
+</pre></div>
+<p></p>
+<ul>
+<li>Ensure that source directory on source cluster and target directory on 
target cluster exists.</li>
+<li>Ensure that these dirs are snapshot-able by user submitting extension. You 
can find more <a class="externalLink" 
href="https://hadoop.apache.org/docs/r2.7.2/hadoop-project-dist/hadoop-hdfs/HdfsSnapshots.html";>information
 on snapshots here</a>.</li></ul></div>
+<div class="section">
+<h4>HDFS Snapshot based mirroring extension properties<a 
name="HDFS_Snapshot_based_mirroring_extension_properties"></a></h4>
+<p>Extension artifacts are expected to be installed on HDFS at the path 
specified by &quot;extension.store.uri&quot; in startup properties.    
hdfs-snapshot-mirroring-properties.json file located at 
&quot;&lt;extension.store.uri&gt;/hdfs-snapshot-mirroring/META/hdfs-snapshot-mirroring-properties.json&quot;
    lists all the required and optional parameters/arguments for scheduling the 
mirroring job.</p>
+<p>Here is a sample set of properties,</p>
+<div class="source">
+<pre>
+   ## Job Properties
+   jobName=hdfs-snapshot-test
+   jobClusterName=backupCluster
+   jobValidityStart=2016-01-01T00:00Z
+   jobValidityEnd=2016-04-01T00:00Z
+   jobFrequency=hours(12)
+   jobTimezone=UTC
+   jobTags=consumer=consu...@xyz.com
+   jobRetryPolicy=periodic
+   jobRetryDelay=minutes(30)
+   jobRetryAttempts=3
+
+   ## Job owner
+   jobAclOwner=ambari-qa
+   jobAclGroup=users
+   jobAclPermission=*
+
+   ## Source information
+   sourceCluster=primaryCluster
+   sourceSnapshotDir=/apps/falcon/snapshots/source/
+   sourceSnapshotRetentionPolicy=delete
+   sourceSnapshotRetentionAgeLimit=days(15)
+   sourceSnapshotRetentionNumber=10
+
+   ## Target information
+   targetCluster=backupCluster
+   targetSnapshotDir=/apps/falcon/snapshots/target/
+   targetSnapshotRetentionPolicy=delete
+   targetSnapshotRetentionAgeLimit=months(6)
+   targetSnapshotRetentionNumber=20
+
+   ## Distcp properties
+   distcpMaxMaps=1
+   distcpMapBandwidth=100
+   tdeEncryptionEnabled=false
+   
+</pre></div>
+<p>The above properties ensure Falcon hdfs snapshot based mirroring extension 
does the following every 12 hours.</p>
+<ul>
+<li>Create snapshot on dir /apps/falcon/snapshots/source/ on 
primaryCluster.</li>
+<li>DistCP data from /apps/falcon/snapshots/source/ on primaryCluster to 
/apps/falcon/snapshots/target/ on backupCluster.</li>
+<li>Create snapshot on dir /apps/falcon/snapshots/target/ on 
backupCluster.</li>
+<li>Perform retention job on source and target.
+<ul>
+<li>Maintain at least N latest snapshots and delete all other snapshots older 
than specified age limit.</li>
+<li>Today, only &quot;delete&quot; policy is supported for snapshot 
retention.</li></ul></li></ul>
+<p><b>Note:</b> When TDE encryption is enabled on source/target directories, 
DistCP ignores the snapshots and treats it like a regular replication. While 
user may not get the performance benefit of using snapshot based DistCP, the 
extension is still useful for creating and maintaining snapshots.</p></div>
+<div class="section">
+<h4>Submit and schedule HDFS snapshot mirroring extension<a 
name="Submit_and_schedule_HDFS_snapshot_mirroring_extension"></a></h4>
+<p>User can submit extension using CLI or RestAPI. CLI command looks as 
follows</p>
+<div class="source">
+<pre>
+    $FALCON_HOME/bin/falcon extension -submitAndSchedule -extensionName 
hdfs-snapshot-mirroring -file propeties-file.txt
+   
+</pre></div>
+<p>Please Refer to <a href="./Falconcli/FalconCLI.html">Falcon CLI</a> and <a 
href="./Restapi/ResourceList.html">REST API</a> for more details on usage of 
CLI and REST API's.</p></div>
+                  </div>
+          </div>
+
+    <hr/>
+
+    <footer>
+            <div class="container">
+              <div class="row span12">Copyright &copy;                    
2013-2018
+                        <a href="http://www.apache.org";>Apache Software 
Foundation</a>.
+            All Rights Reserved.      
+                    
+      </div>
+
+                          
+                <p id="poweredBy" class="pull-right">
+                          <a href="http://maven.apache.org/"; title="Built by 
Maven" class="poweredBy">
+        <img class="builtBy" alt="Built by Maven" 
src="./images/logos/maven-feather.png" />
+      </a>
+              </p>
+        
+                </div>
+    </footer>
+  </body>
+</html>

http://git-wip-us.apache.org/repos/asf/falcon/blob/91c68bea/content/0.11/HiveIntegration.html
----------------------------------------------------------------------
diff --git a/content/0.11/HiveIntegration.html 
b/content/0.11/HiveIntegration.html
new file mode 100644
index 0000000..3d5c995
--- /dev/null
+++ b/content/0.11/HiveIntegration.html
@@ -0,0 +1,453 @@
+<!DOCTYPE html>
+<!--
+ | Generated by Apache Maven Doxia at 2018-03-12
+ | Rendered using Apache Maven Fluido Skin 1.3.0
+-->
+<html xmlns="http://www.w3.org/1999/xhtml"; xml:lang="en" lang="en">
+  <head>
+    <meta charset="UTF-8" />
+    <meta name="viewport" content="width=device-width, initial-scale=1.0" />
+    <meta name="Date-Revision-yyyymmdd" content="20180312" />
+    <meta http-equiv="Content-Language" content="en" />
+    <title>Falcon - Hive Integration</title>
+    <link rel="stylesheet" href="./css/apache-maven-fluido-1.3.0.min.css" />
+    <link rel="stylesheet" href="./css/site.css" />
+    <link rel="stylesheet" href="./css/print.css" media="print" />
+
+      
+    <script type="text/javascript" 
src="./js/apache-maven-fluido-1.3.0.min.js"></script>
+
+                          
+        
+<script type="text/javascript">$( document ).ready( function() { $( 
'.carousel' ).carousel( { interval: 3500 } ) } );</script>
+          
+            </head>
+        <body class="topBarDisabled">
+          
+                        
+                    
+    
+        <div class="container">
+          <div id="banner">
+        <div class="pull-left">
+                                <div id="bannerLeft">
+                                                                               
                 <img src="images/falcon-logo.png"  alt="Apache Falcon" 
width="200px" height="45px"/>
+                </div>
+                      </div>
+        <div class="pull-right">  </div>
+        <div class="clear"><hr/></div>
+      </div>
+
+      <div id="breadcrumbs">
+        <ul class="breadcrumb">
+                
+                    
+                              <li class="">
+                    <a href="index.html" title="Falcon">
+        Falcon</a>
+        </li>
+      <li class="divider ">/</li>
+        <li class="">Hive Integration</li>
+        
+                
+                    
+                  <li id="publishDate" class="pull-right">Last Published: 
2018-03-12</li> <li class="divider pull-right">|</li>
+              <li id="projectVersion" class="pull-right">Version: 0.11</li>
+            
+                            </ul>
+      </div>
+
+      
+                
+        <div id="bodyColumn" >
+                                  
+            <div class="section">
+<h2>Hive Integration<a name="Hive_Integration"></a></h2></div>
+<div class="section">
+<h3>Overview<a name="Overview"></a></h3>
+<p>Falcon provides data management functions for feeds declaratively. It 
allows users to represent feed locations as time-based partition directories on 
HDFS containing files.</p>
+<p>Hive provides a simple and familiar database like tabular model of data 
management to its users, which are backed by HDFS. It supports two classes of 
tables, managed tables and external tables.</p>
+<p>Falcon allows users to represent feed location as Hive tables. Falcon 
supports both managed and external tables and provide data management services 
for tables such as replication, eviction, archival, etc. Falcon will notify 
HCatalog as a side effect of either acquiring, replicating or evicting a data 
set instance and adds the missing capability of HCatalog table replication.</p>
+<p>In the near future, Falcon will allow users to express pipeline processing 
in Hive scripts apart from Pig and Oozie workflows.</p></div>
+<div class="section">
+<h3>Assumptions<a name="Assumptions"></a></h3>
+<p></p>
+<ul>
+<li>Date is a mandatory first-level partition for Hive tables
+<ul>
+<li>Data availability triggers are based on date pattern in 
Oozie</li></ul></li>
+<li>Tables must be created in Hive prior to adding it as a Feed in Falcon.
+<ul>
+<li>Duplicating this in Falcon will create confusion on the real source of 
truth. Also propagating schema changes</li></ul></li></ul>between systems is a 
hard problem.
+<ul>
+<li>Falcon does not know about the encoding of the data and data should be in 
HCatalog supported format.</li></ul></div>
+<div class="section">
+<h3>Configuration<a name="Configuration"></a></h3>
+<p>Falcon provides a system level option to enable Hive integration. Falcon 
must be configured with an implementation for the catalog registry. The default 
implementation for Hive is shipped with Falcon.</p>
+<div class="source">
+<pre>
+catalog.service.impl=org.apache.falcon.catalog.HiveCatalogService
+
+</pre></div></div>
+<div class="section">
+<h3>Incompatible changes<a name="Incompatible_changes"></a></h3>
+<p>Falcon depends heavily on data-availability triggers for scheduling Falcon 
workflows. Oozie must support data-availability triggers based on HCatalog 
partition availability. This is only available in oozie 4.x.</p>
+<p>Hence, Falcon for Hive support needs Oozie 4.x.</p></div>
+<div class="section">
+<h3>Oozie Shared Library setup<a name="Oozie_Shared_Library_setup"></a></h3>
+<p>Falcon post Hive integration depends heavily on the <a class="externalLink" 
href="http://oozie.apache.org/docs/4.0.1/WorkflowFunctionalSpec.html#a17_HDFS_Share_Libraries_for_Workflow_Applications_since_Oozie_2.3";>shared
 library feature of Oozie</a>. Since the sheer number of jars for HCatalog, Pig 
and Hive are in the many 10s in numbers, its quite daunting to redistribute the 
dependent jars from Falcon.</p>
+<p><a class="externalLink" 
href="http://oozie.apache.org/docs/4.0.1/DG_QuickStart.html#Oozie_Share_Lib_Installation";>This
 is a one time effort in Oozie setup and is quite straightforward.</a></p></div>
+<div class="section">
+<h3>Approach<a name="Approach"></a></h3></div>
+<div class="section">
+<h4>Entity Changes<a name="Entity_Changes"></a></h4>
+<p></p>
+<ul>
+<li>Cluster DSL will have an additional registry-interface section, specifying 
the endpoint for the</li></ul>HCatalog server. If this is absent, no HCatalog 
publication will be done from Falcon for this cluster.
+<div class="source">
+<pre>thrift://hcatalog-server:port
+</pre></div>
+<p></p>
+<ul>
+<li>Feed DSL will allow users to specify the URI (location) for HCatalog 
tables as:</li></ul>
+<div class="source">
+<pre>catalog:database_name:table_name#partitions(key=value?)*
+</pre></div>
+<p></p>
+<ul>
+<li>Failure to publish to HCatalog will be retried (configurable # of retires) 
with back off. Permanent failures</li></ul>after all the retries are exhausted 
will fail the Falcon workflow</div>
+<div class="section">
+<h4>Eviction<a name="Eviction"></a></h4>
+<p></p>
+<ul>
+<li>Falcon will construct DDL statements to filter candidate partitions 
eligible for eviction drop partitions</li>
+<li>Falcon will construct DDL statements to drop the eligible partitions</li>
+<li>Additionally, Falcon will nuke the data on HDFS for external 
tables</li></ul></div>
+<div class="section">
+<h4>Replication<a name="Replication"></a></h4>
+<p></p>
+<ul>
+<li>Falcon will use HCatalog (Hive) API to export the data for a given table 
and the partition,</li></ul>which will result in a data collection that 
includes metadata on the data's storage format, the schema, how the data is 
sorted, what table the data came from, and values of any partition keys from 
that table.
+<ul>
+<li>Falcon will use discp tool to copy the exported data collection into the 
secondary cluster into a staging</li></ul>directory used by Falcon.
+<ul>
+<li>Falcon will then import the data into HCatalog (Hive) using the HCatalog 
(Hive) API. If the specified table does</li></ul>not yet exist, Falcon will 
create it, using the information in the imported metadata to set defaults for 
the table such as schema, storage format, etc.
+<ul>
+<li>The partition is not complete and hence not visible to users until all the 
data is committed on the secondary</li></ul>cluster, (no dirty reads)
+<ul>
+<li>Data collection is staged by Falcon and retries for copy continues from 
where it left off.</li>
+<li>Failure to register with Hive will be retired. After all the attempts are 
exhausted,</li></ul>the data will be cleaned up by Falcon.</div>
+<div class="section">
+<h4>Security<a name="Security"></a></h4>
+<p>The user owns all data managed by Falcon. Falcon runs as the user who 
submitted the feed. Falcon will authenticate with HCatalog as the end user who 
owns the entity and the data.</p>
+<p>For Hive managed tables, the table may be owned by the end user or 
&#xe2;&#x80;&#x9c;hive&#xe2;&#x80;&#x9d;. For 
&#xe2;&#x80;&#x9c;hive&#xe2;&#x80;&#x9d; owned tables, user will have to 
configure the feed as &#xe2;&#x80;&#x9c;hive&#xe2;&#x80;&#x9d;.</p></div>
+<div class="section">
+<h3>Load on HCatalog from Falcon<a 
name="Load_on_HCatalog_from_Falcon"></a></h3>
+<p>It generally depends on the frequency of the feeds configured in Falcon and 
how often data is ingested, replicated, or processed.</p></div>
+<div class="section">
+<h3>User Impact<a name="User_Impact"></a></h3>
+<p></p>
+<ul>
+<li>There should not be any impact to user due to this integration</li>
+<li>Falcon will be fully backwards compatible</li>
+<li>Users have a choice to either choose storage based on files on HDFS as 
they do today or use HCatalog for</li></ul>accessing the data in tables</div>
+<div class="section">
+<h3>Known Limitations<a name="Known_Limitations"></a></h3></div>
+<div class="section">
+<h4>Oozie<a name="Oozie"></a></h4>
+<p></p>
+<ul>
+<li>Falcon with Hadoop 1.x requires copying guava jars manually to sharelib in 
oozie. Hadoop 2.x ships this.</li>
+<li>hcatalog-pig-adapter needs to be copied manually to oozie 
sharelib.</li></ul>
+<div class="source">
+<pre>
+bin/hadoop dfs -copyFromLocal 
$LFS/share/lib/hcatalog/hcatalog-pig-adapter-0.5.0-incubating.jar 
share/lib/hcatalog
+
+</pre></div>
+<p></p>
+<ul>
+<li>Oozie 4.x with Hadoop-2.x</li></ul>Replication jobs are submitted to oozie 
on the destination cluster. Oozie runs a table export job on RM on source 
cluster. Oozie server on the target cluster must be configured with source 
hadoop configs else jobs fail with errors on secure and non-secure clusters as 
below:
+<div class="source">
+<pre>
+org.apache.hadoop.security.token.SecretManager$InvalidToken: Password not 
found for ApplicationAttempt appattempt_1395965672651_0010_000002
+
+</pre></div>
+<p>Make sure all oozie servers that falcon talks to has the hadoop configs 
configured in oozie-site.xml</p>
+<div class="source">
+<pre>
+&lt;property&gt;
+      
&lt;name&gt;oozie.service.HadoopAccessorService.hadoop.configurations&lt;/name&gt;
+      
&lt;value&gt;*=/etc/hadoop/conf,arpit-new-falcon-1.cs1cloud.internal:8020=/etc/hadoop-1,arpit-new-falcon-1.cs1cloud.internal:8032=/etc/hadoop-1,arpit-new-falcon-2.cs1cloud.internal:8020=/etc/hadoop-2,arpit-new-falcon-2.cs1cloud.internal:8032=/etc/hadoop-2,arpit-new-falcon-5.cs1cloud.internal:8020=/etc/hadoop-3,arpit-new-falcon-5.cs1cloud.internal:8032=/etc/hadoop-3&lt;/value&gt;
+      &lt;description&gt;
+          Comma separated AUTHORITY=HADOOP_CONF_DIR, where AUTHORITY is the 
HOST:PORT of
+          the Hadoop service (JobTracker, HDFS). The wildcard '*' 
configuration is
+          used when there is no exact match for an authority. The 
HADOOP_CONF_DIR contains
+          the relevant Hadoop *-site.xml files. If the path is relative is 
looked within
+          the Oozie configuration directory; though the path can be absolute 
(i.e. to point
+          to Hadoop client conf/ directories in the local filesystem.
+      &lt;/description&gt;
+    &lt;/property&gt;
+
+</pre></div></div>
+<div class="section">
+<h4>Hive<a name="Hive"></a></h4>
+<p></p>
+<ul>
+<li>Dated Partitions</li></ul>Falcon does not work well when table partition 
contains multiple dated columns. Falcon only works with a single dated 
partition. This is being tracked in FALCON-357 which is a limitation in Oozie.
+<div class="source">
+<pre>
+catalog:default:table4#year=${YEAR};month=${MONTH};day=${DAY};hour=${HOUR};minute=${MINUTE}
+
+</pre></div>
+<p></p>
+<ul>
+<li><a class="externalLink" 
href="https://issues.apache.org/jira/browse/HIVE-5550";>Hive table import fails 
for tables created with default text and sequence file formats using HCatalog 
API</a></li></ul>For some arcane reason, hive substitutes the output format for 
text and sequence to be prefixed with Hive. Hive table import fails since it 
compares against the input and output formats of the source table and they are 
different. Say, a table was created with out specifying the file format, it 
defaults to:
+<div class="source">
+<pre>
+fileFormat=TextFile, inputformat=org.apache.hadoop.mapred.TextInputFormat, 
outputformat=org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
+
+</pre></div>
+<p>But, when hive fetches the table from the metastore, it replaces the output 
format with org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat and the 
comparison between source and target table fails.</p>
+<div class="source">
+<pre>
+org.apache.hadoop.hive.ql.parse.ImportSemanticAnalyzer#checkTable
+      // check IF/OF/Serde
+      String existingifc = table.getInputFormatClass().getName();
+      String importedifc = tableDesc.getInputFormat();
+      String existingofc = table.getOutputFormatClass().getName();
+      String importedofc = tableDesc.getOutputFormat();
+      if ((!existingifc.equals(importedifc))
+          || (!existingofc.equals(importedofc))) {
+        throw new SemanticException(
+            ErrorMsg.INCOMPATIBLE_SCHEMA
+                .getMsg(&quot; Table inputformat/outputformats do not 
match&quot;));
+      }
+
+</pre></div>
+<p>The above is not an issue with Hive 0.13.</p></div>
+<div class="section">
+<h3>Hive Examples<a name="Hive_Examples"></a></h3>
+<p>Following is an example entity configuration for lifecycle management 
functions for tables in Hive.</p></div>
+<div class="section">
+<h4>Hive Table Lifecycle Management - Replication and Retention<a 
name="Hive_Table_Lifecycle_Management_-_Replication_and_Retention"></a></h4></div>
+<div class="section">
+<h5>Primary Cluster<a name="Primary_Cluster"></a></h5>
+<div class="source">
+<pre>
+&lt;?xml version=&quot;1.0&quot;?&gt;
+&lt;!--
+    Primary cluster configuration for demo vm
+  --&gt;
+&lt;cluster colo=&quot;west-coast&quot; description=&quot;Primary Cluster&quot;
+         name=&quot;primary-cluster&quot;
+         xmlns=&quot;uri:falcon:cluster:0.1&quot; 
xmlns:xsi=&quot;http://www.w3.org/2001/XMLSchema-instance&quot;&gt;
+    &lt;interfaces&gt;
+        &lt;interface type=&quot;readonly&quot; 
endpoint=&quot;hftp://localhost:10070&quot;
+                   version=&quot;1.1.1&quot; /&gt;
+        &lt;interface type=&quot;write&quot; 
endpoint=&quot;hdfs://localhost:10020&quot;
+                   version=&quot;1.1.1&quot; /&gt;
+        &lt;interface type=&quot;execute&quot; 
endpoint=&quot;localhost:10300&quot;
+                   version=&quot;1.1.1&quot; /&gt;
+        &lt;interface type=&quot;workflow&quot; 
endpoint=&quot;http://localhost:11010/oozie/&quot;
+                   version=&quot;4.0.1&quot; /&gt;
+        &lt;interface type=&quot;registry&quot; 
endpoint=&quot;thrift://localhost:19083&quot;
+                   version=&quot;0.11.0&quot; /&gt;
+        &lt;interface type=&quot;messaging&quot; 
endpoint=&quot;tcp://localhost:61616?daemon=true&quot;
+                   version=&quot;5.4.3&quot; /&gt;
+    &lt;/interfaces&gt;
+    &lt;locations&gt;
+        &lt;location name=&quot;staging&quot; 
path=&quot;/apps/falcon/staging&quot; /&gt;
+        &lt;location name=&quot;temp&quot; path=&quot;/tmp&quot; /&gt;
+        &lt;location name=&quot;working&quot; 
path=&quot;/apps/falcon/working&quot; /&gt;
+    &lt;/locations&gt;
+&lt;/cluster&gt;
+
+</pre></div></div>
+<div class="section">
+<h5>BCP Cluster<a name="BCP_Cluster"></a></h5>
+<div class="source">
+<pre>
+&lt;?xml version=&quot;1.0&quot;?&gt;
+&lt;!--
+    BCP cluster configuration for demo vm
+  --&gt;
+&lt;cluster colo=&quot;east-coast&quot; description=&quot;BCP Cluster&quot;
+         name=&quot;bcp-cluster&quot;
+         xmlns=&quot;uri:falcon:cluster:0.1&quot; 
xmlns:xsi=&quot;http://www.w3.org/2001/XMLSchema-instance&quot;&gt;
+    &lt;interfaces&gt;
+        &lt;interface type=&quot;readonly&quot; 
endpoint=&quot;hftp://localhost:20070&quot;
+                   version=&quot;1.1.1&quot; /&gt;
+        &lt;interface type=&quot;write&quot; 
endpoint=&quot;hdfs://localhost:20020&quot;
+                   version=&quot;1.1.1&quot; /&gt;
+        &lt;interface type=&quot;execute&quot; 
endpoint=&quot;localhost:20300&quot;
+                   version=&quot;1.1.1&quot; /&gt;
+        &lt;interface type=&quot;workflow&quot; 
endpoint=&quot;http://localhost:11020/oozie/&quot;
+                   version=&quot;4.0.1&quot; /&gt;
+        &lt;interface type=&quot;registry&quot; 
endpoint=&quot;thrift://localhost:29083&quot;
+                   version=&quot;0.11.0&quot; /&gt;
+        &lt;interface type=&quot;messaging&quot; 
endpoint=&quot;tcp://localhost:61616?daemon=true&quot;
+                   version=&quot;5.4.3&quot; /&gt;
+    &lt;/interfaces&gt;
+    &lt;locations&gt;
+        &lt;location name=&quot;staging&quot; 
path=&quot;/apps/falcon/staging&quot; /&gt;
+        &lt;location name=&quot;temp&quot; path=&quot;/tmp&quot; /&gt;
+        &lt;location name=&quot;working&quot; 
path=&quot;/apps/falcon/working&quot; /&gt;
+    &lt;/locations&gt;
+&lt;/cluster&gt;
+
+</pre></div></div>
+<div class="section">
+<h5>Feed with replication and eviction policy<a 
name="Feed_with_replication_and_eviction_policy"></a></h5>
+<div class="source">
+<pre>
+&lt;?xml version=&quot;1.0&quot;?&gt;
+&lt;!--
+    Replicating Hourly customer table from primary to secondary cluster.
+  --&gt;
+&lt;feed description=&quot;Replicating customer table feed&quot; 
name=&quot;customer-table-replicating-feed&quot;
+      xmlns=&quot;uri:falcon:feed:0.1&quot;&gt;
+    &lt;frequency&gt;hours(1)&lt;/frequency&gt;
+    &lt;timezone&gt;UTC&lt;/timezone&gt;
+
+    &lt;clusters&gt;
+        &lt;cluster name=&quot;primary-cluster&quot; 
type=&quot;source&quot;&gt;
+            &lt;validity start=&quot;2013-09-24T00:00Z&quot; 
end=&quot;2013-10-26T00:00Z&quot;/&gt;
+            &lt;retention limit=&quot;hours(2)&quot; 
action=&quot;delete&quot;/&gt;
+        &lt;/cluster&gt;
+        &lt;cluster name=&quot;bcp-cluster&quot; type=&quot;target&quot;&gt;
+            &lt;validity start=&quot;2013-09-24T00:00Z&quot; 
end=&quot;2013-10-26T00:00Z&quot;/&gt;
+            &lt;retention limit=&quot;days(30)&quot; 
action=&quot;delete&quot;/&gt;
+
+            &lt;table 
uri=&quot;catalog:tgt_demo_db:customer_bcp#ds=${YEAR}-${MONTH}-${DAY}-${HOUR}&quot;
 /&gt;
+        &lt;/cluster&gt;
+    &lt;/clusters&gt;
+
+    &lt;table 
uri=&quot;catalog:src_demo_db:customer_raw#ds=${YEAR}-${MONTH}-${DAY}-${HOUR}&quot;
 /&gt;
+
+    &lt;ACL owner=&quot;seetharam&quot; group=&quot;users&quot; 
permission=&quot;0755&quot;/&gt;
+    &lt;schema location=&quot;&quot; provider=&quot;hcatalog&quot;/&gt;
+&lt;/feed&gt;
+
+</pre></div></div>
+<div class="section">
+<h4>Hive Table used in Processing Pipelines<a 
name="Hive_Table_used_in_Processing_Pipelines"></a></h4></div>
+<div class="section">
+<h5>Primary Cluster<a name="Primary_Cluster"></a></h5>
+<p>The cluster definition from the lifecycle example can be used.</p></div>
+<div class="section">
+<h5>Input Feed<a name="Input_Feed"></a></h5>
+<div class="source">
+<pre>
+&lt;?xml version=&quot;1.0&quot;?&gt;
+&lt;feed description=&quot;clicks log table &quot; 
name=&quot;input-table&quot; xmlns=&quot;uri:falcon:feed:0.1&quot;&gt;
+    &lt;groups&gt;online,bi&lt;/groups&gt;
+    &lt;frequency&gt;hours(1)&lt;/frequency&gt;
+    &lt;timezone&gt;UTC&lt;/timezone&gt;
+
+    &lt;clusters&gt;
+        &lt;cluster name=&quot;##cluster##&quot; type=&quot;source&quot;&gt;
+            &lt;validity start=&quot;2010-01-01T00:00Z&quot; 
end=&quot;2012-04-21T00:00Z&quot;/&gt;
+            &lt;retention limit=&quot;hours(24)&quot; 
action=&quot;delete&quot;/&gt;
+        &lt;/cluster&gt;
+    &lt;/clusters&gt;
+
+    &lt;table 
uri=&quot;catalog:falcon_db:input_table#ds=${YEAR}-${MONTH}-${DAY}-${HOUR}&quot;
 /&gt;
+
+    &lt;ACL owner=&quot;testuser&quot; group=&quot;group&quot; 
permission=&quot;0x755&quot;/&gt;
+    &lt;schema location=&quot;/schema/clicks&quot; 
provider=&quot;protobuf&quot;/&gt;
+&lt;/feed&gt;
+
+</pre></div></div>
+<div class="section">
+<h5>Output Feed<a name="Output_Feed"></a></h5>
+<div class="source">
+<pre>
+&lt;?xml version=&quot;1.0&quot;?&gt;
+&lt;feed description=&quot;clicks log identity table&quot; 
name=&quot;output-table&quot; xmlns=&quot;uri:falcon:feed:0.1&quot;&gt;
+    &lt;groups&gt;online,bi&lt;/groups&gt;
+    &lt;frequency&gt;hours(1)&lt;/frequency&gt;
+    &lt;timezone&gt;UTC&lt;/timezone&gt;
+
+    &lt;clusters&gt;
+        &lt;cluster name=&quot;##cluster##&quot; type=&quot;source&quot;&gt;
+            &lt;validity start=&quot;2010-01-01T00:00Z&quot; 
end=&quot;2012-04-21T00:00Z&quot;/&gt;
+            &lt;retention limit=&quot;hours(24)&quot; 
action=&quot;delete&quot;/&gt;
+        &lt;/cluster&gt;
+    &lt;/clusters&gt;
+
+    &lt;table 
uri=&quot;catalog:falcon_db:output_table#ds=${YEAR}-${MONTH}-${DAY}-${HOUR}&quot;
 /&gt;
+
+    &lt;ACL owner=&quot;testuser&quot; group=&quot;group&quot; 
permission=&quot;0x755&quot;/&gt;
+    &lt;schema location=&quot;/schema/clicks&quot; 
provider=&quot;protobuf&quot;/&gt;
+&lt;/feed&gt;
+
+</pre></div></div>
+<div class="section">
+<h5>Process<a name="Process"></a></h5>
+<div class="source">
+<pre>
+&lt;?xml version=&quot;1.0&quot;?&gt;
+&lt;process name=&quot;##processName##&quot; 
xmlns=&quot;uri:falcon:process:0.1&quot;&gt;
+    &lt;clusters&gt;
+        &lt;cluster name=&quot;##cluster##&quot;&gt;
+            &lt;validity end=&quot;2012-04-22T00:00Z&quot; 
start=&quot;2012-04-21T00:00Z&quot;/&gt;
+        &lt;/cluster&gt;
+    &lt;/clusters&gt;
+
+    &lt;parallel&gt;1&lt;/parallel&gt;
+    &lt;order&gt;FIFO&lt;/order&gt;
+    &lt;frequency&gt;days(1)&lt;/frequency&gt;
+    &lt;timezone&gt;UTC&lt;/timezone&gt;
+
+    &lt;inputs&gt;
+        &lt;input end=&quot;today(0,0)&quot; start=&quot;today(0,0)&quot; 
feed=&quot;input-table&quot; name=&quot;input&quot;/&gt;
+    &lt;/inputs&gt;
+
+    &lt;outputs&gt;
+        &lt;output instance=&quot;now(0,0)&quot; feed=&quot;output-table&quot; 
name=&quot;output&quot;/&gt;
+    &lt;/outputs&gt;
+
+    &lt;properties&gt;
+        &lt;property name=&quot;blah&quot; value=&quot;blah&quot;/&gt;
+    &lt;/properties&gt;
+
+    &lt;workflow engine=&quot;pig&quot; 
path=&quot;/falcon/test/apps/pig/table-id.pig&quot;/&gt;
+
+    &lt;retry policy=&quot;periodic&quot; delay=&quot;minutes(10)&quot; 
attempts=&quot;3&quot;/&gt;
+&lt;/process&gt;
+
+</pre></div></div>
+<div class="section">
+<h5>Pig Script<a name="Pig_Script"></a></h5>
+<div class="source">
+<pre>
+A = load '$input_database.$input_table' using 
org.apache.hcatalog.pig.HCatLoader();
+B = FILTER A BY $input_filter;
+C = foreach B generate id, value;
+store C into '$output_database.$output_table' USING 
org.apache.hcatalog.pig.HCatStorer('$output_dataout_partitions');
+
+</pre></div></div>
+                  </div>
+          </div>
+
+    <hr/>
+
+    <footer>
+            <div class="container">
+              <div class="row span12">Copyright &copy;                    
2013-2018
+                        <a href="http://www.apache.org";>Apache Software 
Foundation</a>.
+            All Rights Reserved.      
+                    
+      </div>
+
+                          
+                <p id="poweredBy" class="pull-right">
+                          <a href="http://maven.apache.org/"; title="Built by 
Maven" class="poweredBy">
+        <img class="builtBy" alt="Built by Maven" 
src="./images/logos/maven-feather.png" />
+      </a>
+              </p>
+        
+                </div>
+    </footer>
+  </body>
+</html>

http://git-wip-us.apache.org/repos/asf/falcon/blob/91c68bea/content/0.11/HiveMirroring.html
----------------------------------------------------------------------
diff --git a/content/0.11/HiveMirroring.html b/content/0.11/HiveMirroring.html
new file mode 100644
index 0000000..041f3d6
--- /dev/null
+++ b/content/0.11/HiveMirroring.html
@@ -0,0 +1,148 @@
+<!DOCTYPE html>
+<!--
+ | Generated by Apache Maven Doxia at 2018-03-12
+ | Rendered using Apache Maven Fluido Skin 1.3.0
+-->
+<html xmlns="http://www.w3.org/1999/xhtml"; xml:lang="en" lang="en">
+  <head>
+    <meta charset="UTF-8" />
+    <meta name="viewport" content="width=device-width, initial-scale=1.0" />
+    <meta name="Date-Revision-yyyymmdd" content="20180312" />
+    <meta http-equiv="Content-Language" content="en" />
+    <title>Falcon - Hive Mirroring</title>
+    <link rel="stylesheet" href="./css/apache-maven-fluido-1.3.0.min.css" />
+    <link rel="stylesheet" href="./css/site.css" />
+    <link rel="stylesheet" href="./css/print.css" media="print" />
+
+      
+    <script type="text/javascript" 
src="./js/apache-maven-fluido-1.3.0.min.js"></script>
+
+                          
+        
+<script type="text/javascript">$( document ).ready( function() { $( 
'.carousel' ).carousel( { interval: 3500 } ) } );</script>
+          
+            </head>
+        <body class="topBarDisabled">
+          
+                        
+                    
+    
+        <div class="container">
+          <div id="banner">
+        <div class="pull-left">
+                                <div id="bannerLeft">
+                                                                               
                 <img src="images/falcon-logo.png"  alt="Apache Falcon" 
width="200px" height="45px"/>
+                </div>
+                      </div>
+        <div class="pull-right">  </div>
+        <div class="clear"><hr/></div>
+      </div>
+
+      <div id="breadcrumbs">
+        <ul class="breadcrumb">
+                
+                    
+                              <li class="">
+                    <a href="index.html" title="Falcon">
+        Falcon</a>
+        </li>
+      <li class="divider ">/</li>
+        <li class="">Hive Mirroring</li>
+        
+                
+                    
+                  <li id="publishDate" class="pull-right">Last Published: 
2018-03-12</li> <li class="divider pull-right">|</li>
+              <li id="projectVersion" class="pull-right">Version: 0.11</li>
+            
+                            </ul>
+      </div>
+
+      
+                
+        <div id="bodyColumn" >
+                                  
+            <div class="section">
+<h2>Hive Mirroring<a name="Hive_Mirroring"></a></h2></div>
+<div class="section">
+<h3>Overview<a name="Overview"></a></h3>
+<p>Falcon provides feature to replicate Hive metadata and data events from 
source cluster to destination cluster. This is supported for both secure and 
unsecure cluster through Falcon extensions.</p></div>
+<div class="section">
+<h3>Prerequisites<a name="Prerequisites"></a></h3>
+<p>Following is the prerequisites to use Hive Mirrroring</p>
+<p></p>
+<ul>
+<li><b>Hive 1.2.0+</b></li>
+<li><b>Oozie 4.2.0+</b></li></ul>
+<p><b>Note:</b> Set following properties in hive-site.xml for replicating the 
Hive events on source and destination Hive cluster:</p>
+<div class="source">
+<pre>
+    &lt;property&gt;
+        &lt;name&gt;hive.metastore.event.listeners&lt;/name&gt;
+        
&lt;value&gt;org.apache.hive.hcatalog.listener.DbNotificationListener&lt;/value&gt;
+        &lt;description&gt;event listeners that are notified of any metastore 
changes&lt;/description&gt;
+    &lt;/property&gt;
+
+    &lt;property&gt;
+        &lt;name&gt;hive.metastore.dml.events&lt;/name&gt;
+        &lt;value&gt;true&lt;/value&gt;
+    &lt;/property&gt;
+
+</pre></div></div>
+<div class="section">
+<h3>Use Case<a name="Use_Case"></a></h3>
+<p>* Replicate data/metadata of Hive DB &amp; table from source to target 
cluster</p></div>
+<div class="section">
+<h3>Limitations<a name="Limitations"></a></h3>
+<p>* Currently Hive doesn't support create database, roles, views, offline 
tables, direct HDFS writes without registering with metadata and Database/Table 
name mapping replication events. Hence Hive mirroring extension cannot be used 
to replicate above mentioned events between warehouses.</p></div>
+<div class="section">
+<h3>Usage<a name="Usage"></a></h3></div>
+<div class="section">
+<h4>Bootstrap<a name="Bootstrap"></a></h4>
+<p>Perform initial bootstrap of Table and Database from source cluster to 
destination cluster</p>
+<ul>
+<li><b>Database Bootstrap</b></li></ul>For bootstrapping DB replication, first 
destination DB should be created. This step is expected,      since DB 
replication definitions can be set up by users only on pre-existing 
DB&#xe2;&#x80;&#x99;s. Second, Export all tables in      the source db and 
Import it in the destination db, as described in Table bootstrap.
+<p></p>
+<ul>
+<li><b>Table Bootstrap</b></li></ul>For bootstrapping table replication, 
essentially after having turned on the DbNotificationListener      on the 
source db, perform an Export of the table, distcp the Export over to the 
destination      warehouse and do an Import over there. Check the following <a 
class="externalLink" 
href="https://cwiki.apache.org/confluence/display/Hive/LanguageManual+ImportExport";>Hive
 Export-Import</a> for syntax details      and examples.      This will set up 
the destination table so that the events on the source cluster that modify the 
table      will then be replicated.</div>
+<div class="section">
+<h4>Setup source and destination clusters<a 
name="Setup_source_and_destination_clusters"></a></h4>
+<div class="source">
+<pre>
+    $FALCON_HOME/bin/falcon entity -submit -type cluster -file 
/cluster/definition.xml
+   
+</pre></div></div>
+<div class="section">
+<h4>Hive mirroring extension properties<a 
name="Hive_mirroring_extension_properties"></a></h4>
+<p>Extension artifacts are expected to be installed on HDFS at the path 
specified by &quot;extension.store.uri&quot; in startup properties. 
hive-mirroring-properties.json file located at 
&quot;&lt;extension.store.uri&gt;/hive-mirroring/META/hive-mirroring-properties.json&quot;
 lists all the required and optional parameters/arguments for scheduling Hive 
mirroring job.</p></div>
+<div class="section">
+<h4>Submit and schedule Hive mirroring extension<a 
name="Submit_and_schedule_Hive_mirroring_extension"></a></h4>
+<div class="source">
+<pre>
+    $FALCON_HOME/bin/falcon extension -submitAndSchedule -extensionName 
hive-mirroring -file /process/definition.xml
+   
+</pre></div>
+<p>Please Refer to <a href="./Falconcli/FalconCLI.html">Falcon CLI</a> and <a 
href="./Restapi/ResourceList.html">REST API</a> for more details on usage of 
CLI and REST API's.</p></div>
+                  </div>
+          </div>
+
+    <hr/>
+
+    <footer>
+            <div class="container">
+              <div class="row span12">Copyright &copy;                    
2013-2018
+                        <a href="http://www.apache.org";>Apache Software 
Foundation</a>.
+            All Rights Reserved.      
+                    
+      </div>
+
+                          
+                <p id="poweredBy" class="pull-right">
+                          <a href="http://maven.apache.org/"; title="Built by 
Maven" class="poweredBy">
+        <img class="builtBy" alt="Built by Maven" 
src="./images/logos/maven-feather.png" />
+      </a>
+              </p>
+        
+                </div>
+    </footer>
+  </body>
+</html>

http://git-wip-us.apache.org/repos/asf/falcon/blob/91c68bea/content/0.11/ImportExport.html
----------------------------------------------------------------------
diff --git a/content/0.11/ImportExport.html b/content/0.11/ImportExport.html
new file mode 100644
index 0000000..618d783
--- /dev/null
+++ b/content/0.11/ImportExport.html
@@ -0,0 +1,294 @@
+<!DOCTYPE html>
+<!--
+ | Generated by Apache Maven Doxia at 2018-03-12
+ | Rendered using Apache Maven Fluido Skin 1.3.0
+-->
+<html xmlns="http://www.w3.org/1999/xhtml"; xml:lang="en" lang="en">
+  <head>
+    <meta charset="UTF-8" />
+    <meta name="viewport" content="width=device-width, initial-scale=1.0" />
+    <meta name="Date-Revision-yyyymmdd" content="20180312" />
+    <meta http-equiv="Content-Language" content="en" />
+    <title>Falcon - Falcon Data Import and Export</title>
+    <link rel="stylesheet" href="./css/apache-maven-fluido-1.3.0.min.css" />
+    <link rel="stylesheet" href="./css/site.css" />
+    <link rel="stylesheet" href="./css/print.css" media="print" />
+
+      
+    <script type="text/javascript" 
src="./js/apache-maven-fluido-1.3.0.min.js"></script>
+
+                          
+        
+<script type="text/javascript">$( document ).ready( function() { $( 
'.carousel' ).carousel( { interval: 3500 } ) } );</script>
+          
+            </head>
+        <body class="topBarDisabled">
+          
+                        
+                    
+    
+        <div class="container">
+          <div id="banner">
+        <div class="pull-left">
+                                <div id="bannerLeft">
+                                                                               
                 <img src="images/falcon-logo.png"  alt="Apache Falcon" 
width="200px" height="45px"/>
+                </div>
+                      </div>
+        <div class="pull-right">  </div>
+        <div class="clear"><hr/></div>
+      </div>
+
+      <div id="breadcrumbs">
+        <ul class="breadcrumb">
+                
+                    
+                              <li class="">
+                    <a href="index.html" title="Falcon">
+        Falcon</a>
+        </li>
+      <li class="divider ">/</li>
+        <li class="">Falcon Data Import and Export</li>
+        
+                
+                    
+                  <li id="publishDate" class="pull-right">Last Published: 
2018-03-12</li> <li class="divider pull-right">|</li>
+              <li id="projectVersion" class="pull-right">Version: 0.11</li>
+            
+                            </ul>
+      </div>
+
+      
+                
+        <div id="bodyColumn" >
+                                  
+            <div class="section">
+<h2>Falcon Data Import and Export<a 
name="Falcon_Data_Import_and_Export"></a></h2></div>
+<div class="section">
+<h3>Overview<a name="Overview"></a></h3>
+<p>Falcon provides constructs to periodically bring raw data from external 
data sources (like databases, drop boxes etc) onto Hadoop and push derived data 
computed on Hadoop onto external data sources.</p>
+<p>As of this release, Falcon only supports Relational Databases (e.g. Oracle, 
MySQL etc) via JDBC as external data source. The future releases will add 
support for other external data sources.</p></div>
+<div class="section">
+<h3>Prerequisites<a name="Prerequisites"></a></h3>
+<p>Following are the prerequisites to import external data from and export to 
databases.</p>
+<p></p>
+<ul>
+<li><b>Sqoop 1.4.6+</b></li>
+<li><b>Oozie 4.2.0+</b></li>
+<li><b>Appropriate database connector</b></li></ul>
+<p><b>Note:</b> Falcon uses Sqoop for import/export operation. Sqoop will 
require appropriate database driver to connect to the relational database. 
Please refer to the Sqoop documentation for any Sqoop related question. Please 
make sure the database driver jar is copied into oozie share lib for Sqoop.</p>
+<div class="source">
+<pre>
+For example, in order to import and export with MySQL, please make sure the 
latest MySQL connector
+*mysql-connector-java-5.1.31.jar+* is copied into oozie's Sqoop share lib
+
+/user/oozie/share/lib/{lib-dir}/sqoop/mysql-connector-java-5.1.31.jar+
+
+where {lib-dir} value varies in oozie deployments.
+
+
+</pre></div></div>
+<div class="section">
+<h3>Usage<a name="Usage"></a></h3></div>
+<div class="section">
+<h4>Entity Definition and Setup<a name="Entity_Definition_and_Setup"></a></h4>
+<p></p>
+<ul>
+<li><b>Datasource Entity</b></li></ul>Datasource entity abstracts connection 
and credential details to external data sources. The Datasource entity       
supports read and write interfaces with specific credentials. The default 
credential will be used if the read       or write interface does not have its 
own credentials. In general, the Datasource entity will be defined by       
system administrator. Please refer to datasource XSD for more details.
+<p>The following example defines a Datasource entity for a MySQL database. The 
import operation will use       the read interface with url 
&quot;jdbc:mysql://dbhost/test&quot;, user name &quot;import_usr&quot; and 
password text &quot;sqoop&quot;.       Where as, the export operation will use 
the write interface with url &quot;jdbc:mysql://dbhost/test&quot; with user     
  name &quot;export_usr&quot; and password specified in a HDFS file at the 
location &quot;/user/ambari-qa/password-store/password_write_user&quot;.</p>
+<p>The default credential specified will be used if either the read or write 
interface does not provide its own       credentials. The default credential 
specifies the password using password alias feature available via hadoop 
credential       functionality. User will be able to create a password alias 
using &quot;hadoop credential -create &lt;alias&gt; -provider       
&lt;provider-path&gt;&quot; command, where &lt;alias&gt; is a string and 
&lt;provider-path&gt; is a HDFS jceks file. During runtime,       the specified 
alias will be used to look up the password stored encrypted in the jceks hdfs 
file specified under       the providerPath element.</p>
+<p>The available read and write interfaces enable database administrators to 
segregate read and write workloads.</p>
+<div class="source">
+<pre>
+
+      File: mysql-database.xml
+
+      &lt;?xml version=&quot;1.0&quot; encoding=&quot;UTF-8&quot;?&gt;
+      &lt;datasource colo=&quot;west-coast&quot; description=&quot;MySQL 
database on west coast&quot; type=&quot;mysql&quot; name=&quot;mysql-db&quot; 
xmlns=&quot;uri:falcon:datasource:0.1&quot;&gt;
+          &lt;tags&gt;owner=foo...@ambari.apache.org, 
consumer=p...@ambari.apache.org&lt;/tags&gt;
+          &lt;interfaces&gt;
+              &lt;!-- ***** read interface ***** --&gt;
+              &lt;interface type=&quot;readonly&quot; 
endpoint=&quot;jdbc:mysql://dbhost/test&quot;&gt;
+                  &lt;credential type=&quot;password-text&quot;&gt;
+                      &lt;userName&gt;import_usr&lt;/userName&gt;
+                      &lt;passwordText&gt;sqoop&lt;/passwordFile&gt;
+                  &lt;/credential&gt;
+              &lt;/interface&gt;
+
+              &lt;!-- ***** write interface ***** --&gt;
+              &lt;interface type=&quot;write&quot;  
endpoint=&quot;jdbc:mysql://dbhost/test&quot;&gt;
+                  &lt;credential type=&quot;password-file&quot;&gt;
+                      &lt;userName&gt;export_usr&lt;/userName&gt;
+                      
&lt;passwordFile&gt;/user/ambari-qa/password-store/password_write_user&lt;/passwordFile&gt;
+                  &lt;/credential&gt;
+              &lt;/interface&gt;
+
+              &lt;!-- *** default credential *** --&gt;
+              &lt;credential type=&quot;password-alias&quot;&gt;
+                &lt;userName&gt;sqoop2_user&lt;/userName&gt;
+                &lt;passwordAlias&gt;
+                    &lt;alias&gt;sqoop.password.alias&lt;/alias&gt;
+                    
&lt;providerPath&gt;hdfs://namenode:8020/user/ambari-qa/sqoop_password.jceks&lt;/providerPath&gt;
+                &lt;/passwordAlias&gt;
+              &lt;/credential&gt;
+
+          &lt;/interfaces&gt;
+
+          &lt;driver&gt;
+              &lt;clazz&gt;com.mysql.jdbc.Driver&lt;/clazz&gt;
+              
&lt;jar&gt;/user/oozie/share/lib/lib_20150721010816/sqoop/mysql-connector-java-5.1.31&lt;/jar&gt;
+          &lt;/driver&gt;
+      &lt;/datasource&gt;
+      
+</pre></div>
+<p></p>
+<ul>
+<li><b>Feed  Entity</b></li></ul>Feed entity now enables users to define 
IMPORT and EXPORT policies in addition to RETENTION and REPLICATION.       The 
IMPORT and EXPORT policies will refer to a already defined Datasource entity 
for connection and credential       details and take a table name from the 
policy to operate on. Please refer to feed entity XSD for details.
+<p>The following example defines a Feed entity with IMPORT and EXPORT 
policies. Both the IMPORT and EXPORT operations       refer to a datasource 
entity &quot;mysql-db&quot;. The IMPORT operation will use the read interface 
and credentials while       the EXPORT operation will use the write interface 
and credentials. A feed instance is created every 1 hour       since the 
frequency of the Feed is hour(1) and the Feed instances are deleted after 90 
days because of the       retention policy.</p>
+<p>The feed's data location should have some combination of time partitions 
(like ${YEAR}, ${MONTH}, {$DAY},       ${HOUR}, ${MINUTE} etc) if import or 
export policy is associated.</p>
+<div class="source">
+<pre>
+
+      File: customer_email_feed.xml
+
+      &lt;?xml version=&quot;1.0&quot; encoding=&quot;UTF-8&quot;?&gt;
+      &lt;!--
+       A feed representing Hourly customer email data retained for 90 days
+       --&gt;
+      &lt;feed description=&quot;Raw customer email feed&quot; 
name=&quot;customer_feed&quot; xmlns=&quot;uri:falcon:feed:0.1&quot;&gt;
+          
&lt;tags&gt;externalSystem=USWestEmailServers,classification=secure&lt;/tags&gt;
+          &lt;groups&gt;DataImportPipeline&lt;/groups&gt;
+          &lt;frequency&gt;hours(1)&lt;/frequency&gt;
+          &lt;late-arrival cut-off=&quot;hours(4)&quot;/&gt;
+          &lt;clusters&gt;
+              &lt;cluster name=&quot;primaryCluster&quot; 
type=&quot;source&quot;&gt;
+                  &lt;validity start=&quot;2015-12-15T00:00Z&quot; 
end=&quot;2016-03-31T00:00Z&quot;/&gt;
+                  &lt;retention limit=&quot;days(90)&quot; 
action=&quot;delete&quot;/&gt;
+                  &lt;import&gt;
+                      &lt;source name=&quot;mysql-db&quot; 
tableName=&quot;simple&quot;&gt;
+                          &lt;extract type=&quot;full&quot;&gt;
+                              &lt;mergepolicy&gt;snapshot&lt;/mergepolicy&gt;
+                          &lt;/extract&gt;
+                          &lt;fields&gt;
+                              &lt;includes&gt;
+                                  &lt;field&gt;id&lt;/field&gt;
+                                  &lt;field&gt;name&lt;/field&gt;
+                              &lt;/includes&gt;
+                          &lt;/fields&gt;
+                      &lt;/source&gt;
+                      &lt;arguments&gt;
+                          &lt;argument name=&quot;--split-by&quot; 
value=&quot;id&quot;/&gt;
+                          &lt;argument name=&quot;--num-mappers&quot; 
value=&quot;2&quot;/&gt;
+                      &lt;/arguments&gt;
+                  &lt;/import&gt;
+                  &lt;export&gt;
+                        &lt;target name=&quot;mysql-db&quot; 
tableName=&quot;simple_export&quot;&gt;
+                            &lt;load type=&quot;insert&quot;/&gt;
+                            &lt;fields&gt;
+                              &lt;includes&gt;
+                                &lt;field&gt;id&lt;/field&gt;
+                                &lt;field&gt;name&lt;/field&gt;
+                              &lt;/includes&gt;
+                            &lt;/fields&gt;
+                        &lt;/target&gt;
+                        &lt;arguments&gt;
+                             &lt;argument name=&quot;--update-key&quot; 
value=&quot;id&quot;/&gt;
+                        &lt;/arguments&gt;
+                    &lt;/export&gt;
+              &lt;/cluster&gt;
+          &lt;/clusters&gt;
+
+          &lt;locations&gt;
+              &lt;location type=&quot;data&quot; 
path=&quot;/user/ambari-qa/falcon/demo/primary/importfeed/${YEAR}-${MONTH}-${DAY}-${HOUR}-${MINUTE}&quot;/&gt;
+              &lt;location type=&quot;stats&quot; path=&quot;/none&quot;/&gt;
+              &lt;location type=&quot;meta&quot; path=&quot;/none&quot;/&gt;
+          &lt;/locations&gt;
+
+          &lt;ACL owner=&quot;ambari-qa&quot; group=&quot;users&quot; 
permission=&quot;0755&quot;/&gt;
+          &lt;schema location=&quot;/none&quot; provider=&quot;none&quot;/&gt;
+
+      &lt;/feed&gt;
+      
+</pre></div>
+<p></p>
+<ul>
+<li><b>Import policy</b></li></ul>The import policy uses the datasource entity 
specified in the &quot;source&quot; to connect to the database. The tableName   
   specified should exist in the source datasource.
+<p>Extraction type specifies whether to pull data from external datasource 
&quot;full&quot; everytime or &quot;incrementally&quot;.      The mergepolicy 
specifies how to organize (snapshot or append, i.e time series partiitons) the 
data on hadoop.      The valid combinations are:</p>
+<ul>
+<li>
+<ul>
+<li>[full,snapshot] - data is extracted in full and dumped into the feed 
instance location.</li>
+<li>[incremental, append] - data is extracted incrementally using the key 
specified in the <b>deltacolumn</b></li></ul></li></ul>and added as a partition 
to the feed instance location.
+<ul>
+<li>
+<ul>
+<li>[incremental, snapshot] - data is extracted incrementally and merged with 
already existing data on hadoop to</li></ul></li></ul>produce one latest feed 
instance.*This feature is not supported currently*. The use case for this 
feature is         to efficiently import very large dimention tables that have 
updates and inserts onto hadoop and make it available         as a snapshot 
with latest updates to consumers.
+<p>The following example defines an incremental extraction with append 
organization:</p>
+<div class="source">
+<pre>
+           &lt;import&gt;
+                &lt;source name=&quot;mysql-db&quot; 
tableName=&quot;simple&quot;&gt;
+                    &lt;extract type=&quot;incremental&quot;&gt;
+                        &lt;deltacolumn&gt;modified_time&lt;/deltacolumn&gt;
+                        &lt;mergepolicy&gt;append&lt;/mergepolicy&gt;
+                    &lt;/extract&gt;
+                    &lt;fields&gt;
+                        &lt;includes&gt;
+                            &lt;field&gt;id&lt;/field&gt;
+                            &lt;field&gt;name&lt;/field&gt;
+                        &lt;/includes&gt;
+                    &lt;/fields&gt;
+                &lt;/source&gt;
+                &lt;arguments&gt;
+                    &lt;argument name=&quot;--split-by&quot; 
value=&quot;id&quot;/&gt;
+                    &lt;argument name=&quot;--num-mappers&quot; 
value=&quot;2&quot;/&gt;
+                &lt;/arguments&gt;
+            &lt;/import&gt;
+        
+</pre></div>
+<p>The fields option enables to control what fields get imported. By default, 
all fields get import. The &quot;includes&quot; option      brings only those 
fields specified. The &quot;excludes&quot; option brings all the fields other 
than specified.</p>
+<p>The arguments section enables to pass in any extra arguments needed for 
fine control on the underlying implementation --      in this case, Sqoop.</p>
+<p></p>
+<ul>
+<li><b>Export policy</b></li></ul>The export, like import, uses the datasource 
for connecting to the database. Load type specifies whether to insert      or 
only update data onto the external table. Fields option behaves the same way as 
in import policy.      The tableName specified should exist in the external 
datasource.</div>
+<div class="section">
+<h4>Operation<a name="Operation"></a></h4>
+<p>Once the Datasource and Feed entity with import and export policies are 
defined, Users can submit and schedule    the Import and Export operations via 
CLI and REST API as below:</p>
+<div class="source">
+<pre>
+
+    ## submit the mysql-db datasource defined in the file mysql_datasource.xml
+    falcon entity -submit -type datasource -file mysql_datasource.xml
+
+    ## submit the customer_feed specified in the customer_email_feed.xml
+    falcon entity -submit -type feed -file customer_email_feed.xml
+
+    ## schedule the customer_feed
+    falcon entity -schedule -type feed -name customer_feed
+
+   
+</pre></div>
+<p>Falcon will create corresponding oozie bundles with coordinator and 
workflow for import and export operation.</p></div>
+                  </div>
+          </div>
+
+    <hr/>
+
+    <footer>
+            <div class="container">
+              <div class="row span12">Copyright &copy;                    
2013-2018
+                        <a href="http://www.apache.org";>Apache Software 
Foundation</a>.
+            All Rights Reserved.      
+                    
+      </div>
+
+                          
+                <p id="poweredBy" class="pull-right">
+                          <a href="http://maven.apache.org/"; title="Built by 
Maven" class="poweredBy">
+        <img class="builtBy" alt="Built by Maven" 
src="./images/logos/maven-feather.png" />
+      </a>
+              </p>
+        
+                </div>
+    </footer>
+  </body>
+</html>

http://git-wip-us.apache.org/repos/asf/falcon/blob/91c68bea/content/0.11/InstallationSteps.html
----------------------------------------------------------------------
diff --git a/content/0.11/InstallationSteps.html 
b/content/0.11/InstallationSteps.html
new file mode 100644
index 0000000..72fa2a9
--- /dev/null
+++ b/content/0.11/InstallationSteps.html
@@ -0,0 +1,153 @@
+<!DOCTYPE html>
+<!--
+ | Generated by Apache Maven Doxia at 2018-03-12
+ | Rendered using Apache Maven Fluido Skin 1.3.0
+-->
+<html xmlns="http://www.w3.org/1999/xhtml"; xml:lang="en" lang="en">
+  <head>
+    <meta charset="UTF-8" />
+    <meta name="viewport" content="width=device-width, initial-scale=1.0" />
+    <meta name="Date-Revision-yyyymmdd" content="20180312" />
+    <meta http-equiv="Content-Language" content="en" />
+    <title>Falcon - Building & Installing Falcon</title>
+    <link rel="stylesheet" href="./css/apache-maven-fluido-1.3.0.min.css" />
+    <link rel="stylesheet" href="./css/site.css" />
+    <link rel="stylesheet" href="./css/print.css" media="print" />
+
+      
+    <script type="text/javascript" 
src="./js/apache-maven-fluido-1.3.0.min.js"></script>
+
+                          
+        
+<script type="text/javascript">$( document ).ready( function() { $( 
'.carousel' ).carousel( { interval: 3500 } ) } );</script>
+          
+            </head>
+        <body class="topBarDisabled">
+          
+                        
+                    
+    
+        <div class="container">
+          <div id="banner">
+        <div class="pull-left">
+                                <div id="bannerLeft">
+                                                                               
                 <img src="images/falcon-logo.png"  alt="Apache Falcon" 
width="200px" height="45px"/>
+                </div>
+                      </div>
+        <div class="pull-right">  </div>
+        <div class="clear"><hr/></div>
+      </div>
+
+      <div id="breadcrumbs">
+        <ul class="breadcrumb">
+                
+                    
+                              <li class="">
+                    <a href="index.html" title="Falcon">
+        Falcon</a>
+        </li>
+      <li class="divider ">/</li>
+        <li class="">Building & Installing Falcon</li>
+        
+                
+                    
+                  <li id="publishDate" class="pull-right">Last Published: 
2018-03-12</li> <li class="divider pull-right">|</li>
+              <li id="projectVersion" class="pull-right">Version: 0.11</li>
+            
+                            </ul>
+      </div>
+
+      
+                
+        <div id="bodyColumn" >
+                                  
+            <div class="section">
+<h2>Building &amp; Installing Falcon<a 
name="Building__Installing_Falcon"></a></h2></div>
+<div class="section">
+<h3>Building Falcon<a name="Building_Falcon"></a></h3></div>
+<div class="section">
+<h4>Prerequisites<a name="Prerequisites"></a></h4>
+<p></p>
+<ul>
+<li>JDK 1.7/1.8</li>
+<li>Maven 3.2.x</li></ul></div>
+<div class="section">
+<h4>Step 1 - Clone the Falcon repository<a 
name="Step_1_-_Clone_the_Falcon_repository"></a></h4>
+<div class="source">
+<pre>
+$git clone https://git-wip-us.apache.org/repos/asf/falcon.git falcon
+
+</pre></div></div>
+<div class="section">
+<h4>Step 2 - Build Falcon<a name="Step_2_-_Build_Falcon"></a></h4>
+<div class="source">
+<pre>
+$ cd falcon
+$ export MAVEN_OPTS=&quot;-Xmx1024m -XX:MaxPermSize=256m -noverify&quot;
+$ mvn clean install 
+
+
+</pre></div>
+<p>It builds and installs the package into the local repository, for use as a 
dependency in other projects locally.</p>
+<p>[optionally -Dhadoop.version=&lt;&lt;hadoop.version&gt;&gt; can be appended 
to build for a specific version of hadoop]</p>
+<p><b>Note 1:</b> Falcon drops support for Hadoop-1 and only supports Hadoop-2 
from Falcon 0.6 onwards           Falcon build with JDK 1.7 using -noverify 
option</p>
+<p><b>Note 2:</b> To compile Falcon with addon extensions, append additional 
profiles to build command using syntax -P&lt;&lt;profile1,profile2&gt;&gt;      
     For Hive Mirroring extension, use profile&quot;hivedr&quot;. Hive &gt;= 
1.2.0 and Oozie &gt;= 4.2.0 is required           For HDFS Snapshot mirroring 
extension, use profile &quot;hdfs-snapshot-mirroring&quot;. Hadoop &gt;= 2.7.0 
is required           For ADF integration, use profile &quot;adf&quot;</p></div>
+<div class="section">
+<h4>Step 3 - Package and Deploy Falcon<a 
name="Step_3_-_Package_and_Deploy_Falcon"></a></h4>
+<p>Once the build successfully completes, artifacts can be packaged for 
deployment using the assembly plugin. The Assembly Plugin for Maven is 
primarily intended to allow users to aggregate the project output along with 
its dependencies, modules, site documentation, and other files into a single 
distributable archive. There are two basic ways in which you can deploy Falcon 
- Embedded mode(also known as Stand Alone Mode) and Distributed mode. Your next 
steps will vary based on the mode in which you want to deploy Falcon.</p>
+<p><b>NOTE</b> : Oozie is being extended by Falcon (particularly on 
el-extensions) and hence the need for Falcon to build &amp; re-package Oozie, 
so that users of Falcon can work with the right Oozie setup. Though Oozie is 
packaged by Falcon, it needs to be deployed separately by the administrator and 
is not auto deployed along with Falcon.</p></div>
+<div class="section">
+<h5>Embedded/Stand Alone Mode<a name="EmbeddedStand_Alone_Mode"></a></h5>
+<p>Embedded mode is useful when the Hadoop jobs and relevant data processing 
involve only one Hadoop cluster. In this mode  there is a single Falcon server 
that contacts the scheduler to schedule jobs on Hadoop. All the process/feed 
requests  like submit, schedule, suspend, kill etc. are sent to this server. 
For running Falcon in this mode one should use the  Falcon which has been built 
using standalone option. You can find the instructions for Embedded mode setup  
<a href="./Embedded-mode.html">here</a>.</p></div>
+<div class="section">
+<h5>Distributed Mode<a name="Distributed_Mode"></a></h5>
+<p>Distributed mode is for multiple (colos) instances of Hadoop clusters, and 
multiple workflow schedulers to handle them. In this mode Falcon has 2 
components: Prism and Server(s). Both Prism and Server(s) have their own their 
own config locations(startup and runtime properties). In this mode Prism acts 
as a contact point for Falcon servers. While  all commands are available 
through Prism, only read and instance api's are available through Server. You 
can find the  instructions for Distributed Mode setup <a 
href="./Distributed-mode.html">here</a>.</p></div>
+<div class="section">
+<h4>Preparing Oozie and Falcon packages for deployment<a 
name="Preparing_Oozie_and_Falcon_packages_for_deployment"></a></h4>
+<div class="source">
+<pre>
+$cd &lt;&lt;project home&gt;&gt;
+$src/bin/package.sh &lt;&lt;hadoop-version&gt;&gt; 
&lt;&lt;oozie-version&gt;&gt;
+
+&gt;&gt; ex. src/bin/package.sh 1.1.2 4.0.1 or src/bin/package.sh 
0.20.2-cdh3u5 4.0.1
+&gt;&gt; ex. src/bin/package.sh 2.5.0 4.0.0
+&gt;&gt; Falcon package is available in &lt;&lt;falcon 
home&gt;&gt;/target/apache-falcon-&lt;&lt;version&gt;&gt;-bin.tar.gz
+&gt;&gt; Oozie package is available in &lt;&lt;falcon 
home&gt;&gt;/target/oozie-4.0.1-distro.tar.gz
+&gt;&gt; __IMPORTANT:  You need to download the je-5.0.73 version from 
http://download.oracle.com/otn/berkeley-db/je-5.0.73.zip and extract je-5.0.73 
under the Falcon webapp directory or provision an HBase cluster for use as 
Falcon graphdb backend DB.
+    Depending on the Graphdb backend choice, update the startup.properties 
appropriately.__
+
+</pre></div>
+<p><b>NOTE:</b> If you have a separate Apache Oozie installation, you will 
need to follow some additional steps:</p>
+<ol style="list-style-type: decimal">
+<li>Once you have setup the Falcon Server, copy libraries under 
{falcon-server-dir}/oozie/libext/ to {oozie-install-dir}/libext.</li>
+<li>Modify Oozie's configuration file. Copy all Falcon related properties from 
{falcon-server-dir}/oozie/conf/oozie-site.xml to 
{oozie-install-dir}/conf/oozie-site.xml</li>
+<li>Restart oozie:
+<ol style="list-style-type: decimal">
+<li>cd {oozie-install-dir}</li>
+<li>sudo -u oozie ./bin/oozie-stop.sh</li>
+<li>sudo -u oozie ./bin/oozie-setup.sh prepare-war</li>
+<li>sudo -u oozie ./bin/oozie-start.sh</li></ol></li></ol></div>
+                  </div>
+          </div>
+
+    <hr/>
+
+    <footer>
+            <div class="container">
+              <div class="row span12">Copyright &copy;                    
2013-2018
+                        <a href="http://www.apache.org";>Apache Software 
Foundation</a>.
+            All Rights Reserved.      
+                    
+      </div>
+
+                          
+                <p id="poweredBy" class="pull-right">
+                          <a href="http://maven.apache.org/"; title="Built by 
Maven" class="poweredBy">
+        <img class="builtBy" alt="Built by Maven" 
src="./images/logos/maven-feather.png" />
+      </a>
+              </p>
+        
+                </div>
+    </footer>
+  </body>
+</html>

http://git-wip-us.apache.org/repos/asf/falcon/blob/91c68bea/content/0.11/MetricCollection.html
----------------------------------------------------------------------
diff --git a/content/0.11/MetricCollection.html 
b/content/0.11/MetricCollection.html
new file mode 100644
index 0000000..322eb91
--- /dev/null
+++ b/content/0.11/MetricCollection.html
@@ -0,0 +1,121 @@
+<!DOCTYPE html>
+<!--
+ | Generated by Apache Maven Doxia at 2018-03-12
+ | Rendered using Apache Maven Fluido Skin 1.3.0
+-->
+<html xmlns="http://www.w3.org/1999/xhtml"; xml:lang="en" lang="en">
+  <head>
+    <meta charset="UTF-8" />
+    <meta name="viewport" content="width=device-width, initial-scale=1.0" />
+    <meta name="Date-Revision-yyyymmdd" content="20180312" />
+    <meta http-equiv="Content-Language" content="en" />
+    <title>Falcon - Metric Collection</title>
+    <link rel="stylesheet" href="./css/apache-maven-fluido-1.3.0.min.css" />
+    <link rel="stylesheet" href="./css/site.css" />
+    <link rel="stylesheet" href="./css/print.css" media="print" />
+
+      
+    <script type="text/javascript" 
src="./js/apache-maven-fluido-1.3.0.min.js"></script>
+
+                          
+        
+<script type="text/javascript">$( document ).ready( function() { $( 
'.carousel' ).carousel( { interval: 3500 } ) } );</script>
+          
+            </head>
+        <body class="topBarDisabled">
+          
+                        
+                    
+    
+        <div class="container">
+          <div id="banner">
+        <div class="pull-left">
+                                <div id="bannerLeft">
+                                                                               
                 <img src="images/falcon-logo.png"  alt="Apache Falcon" 
width="200px" height="45px"/>
+                </div>
+                      </div>
+        <div class="pull-right">  </div>
+        <div class="clear"><hr/></div>
+      </div>
+
+      <div id="breadcrumbs">
+        <ul class="breadcrumb">
+                
+                    
+                              <li class="">
+                    <a href="index.html" title="Falcon">
+        Falcon</a>
+        </li>
+      <li class="divider ">/</li>
+        <li class="">Metric Collection</li>
+        
+                
+                    
+                  <li id="publishDate" class="pull-right">Last Published: 
2018-03-12</li> <li class="divider pull-right">|</li>
+              <li id="projectVersion" class="pull-right">Version: 0.11</li>
+            
+                            </ul>
+      </div>
+
+      
+                
+        <div id="bodyColumn" >
+                                  
+            <div class="section">
+<h3>Metric Collection<a name="Metric_Collection"></a></h3>
+<p>Metric Collection currently allows to collect the following metrics at 
process level:</p>
+<p></p>
+<ol style="list-style-type: decimal">
+<li>Processing time the process spent in the running state in seconds 
(workflow_end_time - workflow_start_time)</li>
+<li>Wait time that the process spent in the waiting/ready state. 
(workflow_start_time - workflow_nominal_time)</li>
+<li>Number of instances that are failed for a process.</li></ol>
+<p>To send data to <b>Graphite</b></p>
+<p>Falcon need to intialize metricNotificationService in 
startup.properties:</p>
+<div class="source">
+<pre>
+*.application.services= org.apache.falcon.metrics.MetricNotificationService,
+
+</pre></div>
+<p>Add following properties for graphiteNotificationPlugin :</p>
+<p><b>Graphite properties</b></p>
+<div class="source">
+<pre>
+   * *.falcon.graphite.hostname=localhost
+   * *.falcon.graphite.port=2003
+   * *.falcon.graphite.frequency=1
+   * *.falcon.graphite.prefix=falcon
+
+</pre></div>
+<p>The falcon.graphite.frequency is in seconds and all the time that is being 
sent to graphite is in seconds.</p>
+<p>To send data to <b>Falcon DB</b></p>
+<p>Falcon needs to <b>ProcessInstanceInfo</b> table in the database have a 
look at <a href="./FalconDatabase.html">FalconDatabase</a> to know how to 
create it.</p>
+<p>Add the following properties in the startup.properties:</p>
+<div class="source">
+<pre>
+#*.monitoring.plugins=org.apache.falcon.plugin.ProcessExecutionStatsPlugin
+
+</pre></div></div>
+                  </div>
+          </div>
+
+    <hr/>
+
+    <footer>
+            <div class="container">
+              <div class="row span12">Copyright &copy;                    
2013-2018
+                        <a href="http://www.apache.org";>Apache Software 
Foundation</a>.
+            All Rights Reserved.      
+                    
+      </div>
+
+                          
+                <p id="poweredBy" class="pull-right">
+                          <a href="http://maven.apache.org/"; title="Built by 
Maven" class="poweredBy">
+        <img class="builtBy" alt="Built by Maven" 
src="./images/logos/maven-feather.png" />
+      </a>
+              </p>
+        
+                </div>
+    </footer>
+  </body>
+</html>

http://git-wip-us.apache.org/repos/asf/falcon/blob/91c68bea/content/0.11/MigrationInstructions.html
----------------------------------------------------------------------
diff --git a/content/0.11/MigrationInstructions.html 
b/content/0.11/MigrationInstructions.html
new file mode 100644
index 0000000..83a7695
--- /dev/null
+++ b/content/0.11/MigrationInstructions.html
@@ -0,0 +1,113 @@
+<!DOCTYPE html>
+<!--
+ | Generated by Apache Maven Doxia at 2018-03-12
+ | Rendered using Apache Maven Fluido Skin 1.3.0
+-->
+<html xmlns="http://www.w3.org/1999/xhtml"; xml:lang="en" lang="en">
+  <head>
+    <meta charset="UTF-8" />
+    <meta name="viewport" content="width=device-width, initial-scale=1.0" />
+    <meta name="Date-Revision-yyyymmdd" content="20180312" />
+    <meta http-equiv="Content-Language" content="en" />
+    <title>Falcon - Migration Instructions</title>
+    <link rel="stylesheet" href="./css/apache-maven-fluido-1.3.0.min.css" />
+    <link rel="stylesheet" href="./css/site.css" />
+    <link rel="stylesheet" href="./css/print.css" media="print" />
+
+      
+    <script type="text/javascript" 
src="./js/apache-maven-fluido-1.3.0.min.js"></script>
+
+                          
+        
+<script type="text/javascript">$( document ).ready( function() { $( 
'.carousel' ).carousel( { interval: 3500 } ) } );</script>
+          
+            </head>
+        <body class="topBarDisabled">
+          
+                        
+                    
+    
+        <div class="container">
+          <div id="banner">
+        <div class="pull-left">
+                                <div id="bannerLeft">
+                                                                               
                 <img src="images/falcon-logo.png"  alt="Apache Falcon" 
width="200px" height="45px"/>
+                </div>
+                      </div>
+        <div class="pull-right">  </div>
+        <div class="clear"><hr/></div>
+      </div>
+
+      <div id="breadcrumbs">
+        <ul class="breadcrumb">
+                
+                    
+                              <li class="">
+                    <a href="index.html" title="Falcon">
+        Falcon</a>
+        </li>
+      <li class="divider ">/</li>
+        <li class="">Migration Instructions</li>
+        
+                
+                    
+                  <li id="publishDate" class="pull-right">Last Published: 
2018-03-12</li> <li class="divider pull-right">|</li>
+              <li id="projectVersion" class="pull-right">Version: 0.11</li>
+            
+                            </ul>
+      </div>
+
+      
+                
+        <div id="bodyColumn" >
+                                  
+            <div class="section">
+<h2>Migration Instructions<a name="Migration_Instructions"></a></h2></div>
+<div class="section">
+<h3>Migrate from 0.9 to 0.10<a name="Migrate_from_0.9_to_0.10"></a></h3>
+<p>FALCON-1333 (Instance Search feature) requires Falcon to use 
titan-berkeleyje version 0.5.4 to support indexing. Up until version 0.9 - 
Falcon used titan-berkeleyje-jre6 version 0.4.2. GraphDB created by version 
0.4.2 cannot be read by version 0.5.4. The solution is to migrate the GraphDB 
to be compatible with Falcon 0.10 release. Please make sure that no falcon 
server is running while performing the migration.</p></div>
+<div class="section">
+<h4>1. Install Falcon 0.10<a name="a1._Install_Falcon_0.10"></a></h4>
+<p>Install Falcon 0.10 by following the <a 
href="./InstallationSteps.html">Installation Steps</a>. Do not start the falcon 
server yet. The tool to migrate graphDB is packaged with 0.10 Falcon server in 
falcon-common-0.10.jar.</p></div>
+<div class="section">
+<h4>2. Export GraphDB to JSON file using Falcon 0.9<a 
name="a2._Export_GraphDB_to_JSON_file_using_Falcon_0.9"></a></h4>
+<p>Please run the following command to generate the JSON file.</p>
+<div class="source">
+<pre>
+ $FALCON_HOME/bin/graphdbutil.sh export &lt;&lt;java_home&gt; 
&lt;&lt;hadoop_home&gt;&gt; &lt;&lt;falcon_0.9_home&gt;&gt; 
&lt;&lt;path_to_falcon-common-0.10.jar&gt;&gt; /jsonFile/dir/
+
+</pre></div>
+<p>This command will create /jsonFile/dir/instanceMetadata.json</p></div>
+<div class="section">
+<h4>3. Import GraphDB from JSON file using Falcon 0.10<a 
name="a3._Import_GraphDB_from_JSON_file_using_Falcon_0.10"></a></h4>
+<p>Please run the following command to import graphDB the JSON file. The 
location of graphDB will be based on property 
&quot;*.falcon.graph.storage.directory&quot; set in startup.properties file.</p>
+<div class="source">
+<pre>
+  $FALCON_HOME/bin/graphdbutil.sh export &lt;&lt;java_home&gt; 
&lt;&lt;hadoop_home&gt;&gt; &lt;&lt;falcon_0.10_home&gt;&gt; 
&lt;&lt;path_to_falcon-common-0.10.jar&gt;&gt; /jsonFile/dir/
+
+</pre></div>
+<p>This command will import from /jsonFile/dir/instanceMetadata.json, now 
start the Falcon 0.10 server.</p></div>
+                  </div>
+          </div>
+
+    <hr/>
+
+    <footer>
+            <div class="container">
+              <div class="row span12">Copyright &copy;                    
2013-2018
+                        <a href="http://www.apache.org";>Apache Software 
Foundation</a>.
+            All Rights Reserved.      
+                    
+      </div>
+
+                          
+                <p id="poweredBy" class="pull-right">
+                          <a href="http://maven.apache.org/"; title="Built by 
Maven" class="poweredBy">
+        <img class="builtBy" alt="Built by Maven" 
src="./images/logos/maven-feather.png" />
+      </a>
+              </p>
+        
+                </div>
+    </footer>
+  </body>
+</html>

Reply via email to