APEXCORE-292 #resolve Adding roadmap under docs

Project: http://git-wip-us.apache.org/repos/asf/incubator-apex-site/repo
Commit: 
http://git-wip-us.apache.org/repos/asf/incubator-apex-site/commit/47565155
Tree: http://git-wip-us.apache.org/repos/asf/incubator-apex-site/tree/47565155
Diff: http://git-wip-us.apache.org/repos/asf/incubator-apex-site/diff/47565155

Branch: refs/heads/master
Commit: 475651558f149ede87b034dacdba7adc47d9f20e
Parents: e75ace0
Author: sashadt <[email protected]>
Authored: Sat Jan 2 11:33:20 2016 -0800
Committer: sashadt <[email protected]>
Committed: Sat Jan 2 11:33:20 2016 -0800

----------------------------------------------------------------------
 gulpfile.js            |  20 +++----
 roadmap.json           | 129 ++++++++++++++++++++++++++------------------
 src/md/docs.md         |   7 ++-
 src/pages/roadmap.html |  79 +++++++++++++++++++++++++++
 4 files changed, 171 insertions(+), 64 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-apex-site/blob/47565155/gulpfile.js
----------------------------------------------------------------------
diff --git a/gulpfile.js b/gulpfile.js
index aa15863..59cd04b 100644
--- a/gulpfile.js
+++ b/gulpfile.js
@@ -74,7 +74,7 @@ gulp.task('html', ['md2html'], function() {
     .pipe(handlebars({ 
         nav: require('./navigation.json'),
         releases: require('./releases.json'),
-        jiras: require('./roadmap.json')
+        roadmap: require('./roadmap.json')
       }, options))
     .pipe(gulp.dest(BUILD_LOCATION))
     .on('error', function(err) {
@@ -117,10 +117,10 @@ gulp.task('default', ['less', 'html', 'copy:js', 
'copy:images']);
 gulp.task('fetch-roadmap', function(taskCb) {
 
   var projects = [
-    { name: 'APEXCORE', apiUrl: 'https://issues.apache.org/jira/rest/api/2/', 
browseUrl: 'https://issues.apache.org/jira/browse/' },
-    { name: 'MLHR', apiUrl: 'https://malhar.atlassian.net/rest/api/2/', 
browseUrl: 'https://malhar.atlassian.net/browse/' }
+    { key: 'core', name: 'APEXCORE', apiUrl: 
'https://issues.apache.org/jira/rest/api/2/', browseUrl: 
'https://issues.apache.org/jira/browse' },
+    { key: 'malhar', name: 'MLHR', apiUrl: 
'https://malhar.atlassian.net/rest/api/2/', browseUrl: 
'https://malhar.atlassian.net/browse' }
     // Replace when migration from malhar.atlassian.net to ASF 
(issues.apache.org) JIRA is complete
-    // { key: 'apex-malhar',   url: 
'https://issues.apache.org/jira/rest/api/2/', browseUrl: 
'https://issues.apache.org/jira/browse/', project: 'APEXMALHAR' },
+    // { key: 'malhar', name: 'APEXMALHAR', apiUrl: 
'https://issues.apache.org/jira/rest/api/2/', browseUrl: 
'https://issues.apache.org/jira/browse/' },
   ];  
 
 
@@ -173,7 +173,7 @@ gulp.task('fetch-roadmap', function(taskCb) {
           jql: 'project = ' + project.name + ' AND labels in (roadmap) AND 
status NOT IN ( Closed, Resolved )',
           startAt: 0,
           maxResults: 1000,
-          fields: ['summary','priority','status', 'fixVersions']
+          fields: ['summary','priority','status','fixVersions','description']
         };
 
         request.post({
@@ -259,12 +259,14 @@ gulp.task('fetch-roadmap', function(taskCb) {
 
     var fileContents = {};
 
-    // Use the project name as key and provide associated array of matching 
jiras
+    // Use the project key and provide associated arrays of matching jiras and 
versions
     projectResults.forEach(function(project) {
-      _.set(fileContents, project.name, 
+      _.set(fileContents, project.key, 
         {
-          versions: project.versions,
-          jiras: project.jiras
+          name: project.name,
+          browseUrl: project.browseUrl,
+          jiras: project.jiras,
+          versions: project.versions
         });
     });
 

http://git-wip-us.apache.org/repos/asf/incubator-apex-site/blob/47565155/roadmap.json
----------------------------------------------------------------------
diff --git a/roadmap.json b/roadmap.json
index c462577..0064035 100644
--- a/roadmap.json
+++ b/roadmap.json
@@ -1,39 +1,7 @@
 {
-  "APEXCORE": {
-    "versions": [
-      {
-        "self": "https://issues.apache.org/jira/rest/api/2/version/12333945";,
-        "id": "12333945",
-        "name": "3.0.1",
-        "archived": true,
-        "released": false,
-        "projectId": 12318823
-      },
-      {
-        "self": "https://issues.apache.org/jira/rest/api/2/version/12334260";,
-        "id": "12334260",
-        "name": "3.2.1",
-        "archived": false,
-        "released": false,
-        "projectId": 12318823
-      },
-      {
-        "self": "https://issues.apache.org/jira/rest/api/2/version/12333950";,
-        "id": "12333950",
-        "name": "3.3.0",
-        "archived": false,
-        "released": false,
-        "projectId": 12318823
-      },
-      {
-        "self": "https://issues.apache.org/jira/rest/api/2/version/12333948";,
-        "id": "12333948",
-        "name": "4.0.0",
-        "archived": false,
-        "released": false,
-        "projectId": 12318823
-      }
-    ],
+  "core": {
+    "name": "APEXCORE",
+    "browseUrl": "https://issues.apache.org/jira/browse";,
     "jiras": [
       {
         "expand": "operations,editmeta,changelog,transitions,renderedFields",
@@ -42,6 +10,7 @@
         "key": "APEXCORE-3",
         "fields": {
           "summary": "Ability for an operator to populate DAG at launch time",
+          "description": "Apex should have an operator API that lets the 
operator generate DAG during launch time. This will mean the following\r\n\r\n- 
Logical DAG will have one operator. This is the operator that will generate a 
DAG underneath\r\n- Physical plan will have the DAG generated by the 
operator\r\n- Execution plan will mimic physical plan + container location 
etc.\r\n\r\nFor example lets say we have three operators in a DAG (app) 
A->B->C\r\n\r\nB during launch time generates a DAG B1->B2->B3, then the 
physical plan will be\r\n\r\nA->B1->B2->B3->C\r\n\r\nThis should work 
irrespective of number of ports, etc. A typical flattening. The operators 
inside of B (B1, B2, B3) should have properties and attributes just as any. 
Users should be able to access these at run time and compile time. B itself 
should support properties and attributes that B1, B2, B3 can inherit 
from.\r\n\r\nThis is a very critical feature as it will open up users to 
plug-in their own engines and still tak
 e up complete operability support from Apex engine.",
           "fixVersions": [
             {
               "self": 
"https://issues.apache.org/jira/rest/api/2/version/12333950";,
@@ -80,6 +49,7 @@
         "key": "APEXCORE-10",
         "fields": {
           "summary": "Enable non-affinity of operators per node (not 
containers)",
+          "description": "The issue happens on cloud which provides virtual 
cores with software like Xen underneath. In effect if CPU intensive operators 
land up on same node we have a resource bottleneck,\r\n\r\nNeed to create an 
attribute that does the following\r\n- Operators A & B should not be on same 
node\r\n- Stram should use this attribute to try to get containers on different 
node\r\n\r\nIt is understood that the user is making an explicit choice to use 
NIC instead of stream local optimization",
           "fixVersions": [],
           "priority": {
             "self": "https://issues.apache.org/jira/rest/api/2/priority/3";,
@@ -110,6 +80,7 @@
         "key": "APEXCORE-60",
         "fields": {
           "summary": "Iterative processing support",
+          "description": "We would like to support iterative processing by 
introducing cycles in the graph (known as DAG now, but no longer if we support 
iterative processing).\r\n\r\nInitial idea is as follow:\r\n{noformat}\r\n     
|----|\r\n     v    |\r\nA -> B -> C -> D\r\n^         
|\r\n|---------|\r\n{noformat} \r\n\r\nC has two separate backward streams to A 
and B.  The input ports of A and B that C connects to will have a special 
attribute on how many window IDs ahead the incoming windows should be treated 
as, and A and B will be responsible for the initial data for such input 
ports.\r\n\r\nAnother idea is to have C advance the window ID on its output 
ports and have C generate the initial data on its output ports to A and B.",
           "fixVersions": [
             {
               "self": 
"https://issues.apache.org/jira/rest/api/2/version/12333950";,
@@ -148,6 +119,7 @@
         "key": "APEXCORE-119",
         "fields": {
           "summary": "Add Support For A New Type Of (Distributed) Operator",
+          "description": "This JIRA Proposes support for a new type of 
distributed operator. Currently when an operator is partitioned there is no 
platform supported mechanism through which partitions can talk to each other. A 
Distributed operator would have an easy to use platform supported mechanism 
through which operators in a partitioning can exchange information with each 
other. Eventually Distributed operators would support running plain old single 
threaded java code transparently across partitions.\r\n\r\nIn summary the goals 
would be to do the following:\r\n\r\n1 - provide a platform supported fault 
tolerant mechanism through which operators in a partitioning can talk to each 
other.\r\n2 - provide a platform supported way to run plain old single threaded 
java code accross all the partitions of a Distributed operator\r\n\r\nThe 
benefits of implementing this would be huge:\r\n\r\n1 - Using distributed 
operators we could support large in memory fault tolerant data structures (g
 raphs, maps, arrays) in a fault tolerant way. Like Spark's RDD's but 
better.\r\n2 - Plain old java code could be used to access and manipulate the 
data structures, without the user having the learn complex API's like with 
Spark.\r\n\r\nAn implementation proposal and presentation are coming soon.",
           "fixVersions": [],
           "priority": {
             "self": "https://issues.apache.org/jira/rest/api/2/priority/3";,
@@ -178,6 +150,7 @@
         "key": "APEXCORE-163",
         "fields": {
           "summary": "Dynamic application property changes",
+          "description": "Apex support modification of operator properties at 
runtime but the current implemenations has the following 
shortcomings.\r\n\r\n1. Property is not set across all partitions on the same 
window as individual partitions can be on different windows when property 
change is initiated from client resulting in inconsistency of data for those 
windows. I am being generous using the word inconsistent.\r\n2. Sometimes 
properties need to be set on more than one logical operators at the same time 
to achieve the change the user is seeking. Today they will be two separate 
changes happening on two different windows again resulting in inconsistent data 
for some windows. These would need to happen as a single transaction.\r\n3. If 
there is an operator failure before a committed checkpoint after an operator 
property is dynamically changed the operator will restart with the old property 
and the change will not be re-applied.\r\n\r\nTim and myself did some 
brainstorming and we
  have a proposal to overcome these shortcomings. The main problem in all the 
above cases is that the property changes are happening out-of-band of data flow 
and hence independent of windowing. The proposal is to bring the property 
change request into the in-band dataflow so that they are handled consistently 
with windowing and handled distributively.\r\n\r\nThe idea is to inject a 
special property change tuple containing the property changes and the 
identification information of the operator's they affect into the dataflow at 
the input operator. The tuple will be injected at window boundary after end 
window and before begin window and as this tuple flows through the DAG the 
intended operators properties will be modifed. They will all be modified 
consistently at the same window. The tuple can contain more than one property 
changes for more than one logical operators and the change will be applied 
consistently to the different logical operators at the same window. In case of 
failure t
 he replay of tuples will ensure that the property change gets reapplied at the 
correct window.",
           "fixVersions": [],
           "priority": {
             "self": "https://issues.apache.org/jira/rest/api/2/priority/3";,
@@ -208,6 +181,7 @@
         "key": "APEXCORE-202",
         "fields": {
           "summary": "Integration with Samoa",
+          "description": "Apache Samoa[https://samoa.incubator.apache.org/] is 
an abstraction of a collections of streaming machine learning Algorithm. By 
far, it has integration with Samza, Storm and flink, It is a good start point 
for Apex to support streaming ML.",
           "fixVersions": [],
           "priority": {
             "self": "https://issues.apache.org/jira/rest/api/2/priority/3";,
@@ -238,6 +212,7 @@
         "key": "APEXCORE-231",
         "fields": {
           "summary": "Ability to configure attributes dynamically",
+          "description": "The Apex engine supports many platform level 
attributes like operator memory, application window count, container jvm 
options etc. Today these can only be set at application launch time and cannot 
be changed once the application is running.\r\n\r\nThis issue is to add the 
ability to change the attributes dynamically even as the application is 
running. The mechanics of an user requesting the attribute change can be 
similar to how a user requests property change via the command line 
client.\r\n\r\nSince each attribute is different the actual backend 
implementation to affect the changes will most likely be custom handling for 
different attributes but during the implementation process  hopefully some 
common themes emerge and some amount of reuse possible.",
           "fixVersions": [],
           "priority": {
             "self": "https://issues.apache.org/jira/rest/api/2/priority/3";,
@@ -268,6 +243,7 @@
         "key": "APEXCORE-232",
         "fields": {
           "summary": "Ability to add new processing code to the DAG",
+          "description": "There are scenarios when new processing code needs 
to be added to an already running application. There are two 
scenarios.\r\n\r\na. A bug is discovered in an operator and an existing 
operator in the running DAG needs to be replaced. The platform supports 
shutting down and resuming an application which could be use as a first cut way 
to do this but there are a couple of drawbacks.\r\n       i. This only works 
when the input source has memory, if it doesn't the messages received during 
the time the application is down are lost.\r\n      ii. Depending on the 
complexity and state of the application it may take some time for this entire 
process and the application to get back to running state and this delay may not 
be acceptable for the downstream components that depend on the output of this 
application.\r\n\r\nb. A new operator needs to be added to the DAG to take data 
from an existing operator and do some additional processing. Today this is 
supported as long
  as the code for the operator is already in the application libraries. Often 
this will not be the case as users will not know what the operator will be 
beforehand when the application is originally launched.",
           "fixVersions": [],
           "priority": {
             "self": "https://issues.apache.org/jira/rest/api/2/priority/3";,
@@ -298,6 +274,7 @@
         "key": "APEXCORE-233",
         "fields": {
           "summary": "Ability to specify single instance objects in 
configuration",
+          "description": "There are scenarios where the same object instance 
needs to be specified for two attributes. Example is partitioner and stats 
listener, for partitioners that need to affect partitoning based on operator 
stats the same instance needs to be both. This is not possible to specify using 
a property file today as it will create two separate instances and can only be 
done in Java code today. The issue is to request adding this feature.",
           "fixVersions": [],
           "priority": {
             "self": "https://issues.apache.org/jira/rest/api/2/priority/3";,
@@ -328,6 +305,7 @@
         "key": "APEXCORE-234",
         "fields": {
           "summary": "Investigate other ways to specify properties in property 
files",
+          "description": "The current property file specification follows the 
hadoop configuration file format and this has led to some drawbacks. \r\n    a. 
The names for the properties and attributes are verbose in the configuration 
file. \r\n    b. When there are nested properties in operators the syntax 
deviates from the bean specification because it introduces some specific 
keywords in the specification like .prop and ,attr.\r\n\r\nThere will already 
be some changes afoot based on the following\r\n   a. When adding ability to 
specify single instance attributes 
(https://malhar.atlassian.net/browse/APEXCORE-233) implementing it in the 
current syntax may not be possible or lead to very unwieldy syntax.\r\n   b. 
There are also other ideas such as one from David to have the ability to 
specify global application level attributes which possible require rethinking 
the current syntax.\r\n\r\nUsers have also asked for an easier and more 
consistent way to specify these properties.  This i
 ssue is to track the ideas and progress of these changes.",
           "fixVersions": [],
           "priority": {
             "self": "https://issues.apache.org/jira/rest/api/2/priority/3";,
@@ -358,6 +336,7 @@
         "key": "APEXCORE-235",
         "fields": {
           "summary": "Explicit support for batch processing",
+          "description": "Apex can be used for real-time and batch processing 
as it stands, but there are some aspects of batch processing that can be better 
supported through explicit constructs. This ticket can serve as umbrella for 
various features.",
           "fixVersions": [],
           "priority": {
             "self": "https://issues.apache.org/jira/rest/api/2/priority/3";,
@@ -388,6 +367,7 @@
         "key": "APEXCORE-289",
         "fields": {
           "summary": "Encrypted Streams in Apex DAG",
+          "description": "We should support encrypted streams in a DAG for 
Apex.\r\nBasically there will be 2 ways user can configure the streams for 
encryption:\r\n1) App wide attributes- Using which all the stream in the DAG 
will have encrypted channel.\r\n2) Stream based attribute - Using this user can 
set a certain stream to flow over encrypted channel.\r\n\r\nEncrypted for the 
streams should done at Network/Buffer Server levels.",
           "fixVersions": [],
           "priority": {
             "self": "https://issues.apache.org/jira/rest/api/2/priority/3";,
@@ -418,6 +398,7 @@
         "key": "APEXCORE-293",
         "fields": {
           "summary": "Add core and malhar documentation to project web site",
+          "description": null,
           "fixVersions": [],
           "priority": {
             "self": "https://issues.apache.org/jira/rest/api/2/priority/3";,
@@ -448,6 +429,7 @@
         "key": "APEXCORE-295",
         "fields": {
           "summary": "Running a Storm topology on Apex.",
+          "description": "Flink streaming is compatible with Apache Storm 
interfaces and therefore allows reusing code that was implemented for 
Storm.\r\nDetails can be found 
here.\r\nhttps://ci.apache.org/projects/flink/flink-docs-master/apis/storm_compatibility.html\r\nThis
 jira item can contain tasks for providing similar support in Apex",
           "fixVersions": [],
           "priority": {
             "self": "https://issues.apache.org/jira/rest/api/2/priority/3";,
@@ -471,43 +453,45 @@
           }
         }
       }
-    ]
-  },
-  "MLHR": {
+    ],
     "versions": [
       {
-        "self": "https://malhar.atlassian.net/rest/api/2/version/11604";,
-        "id": "11604",
+        "self": "https://issues.apache.org/jira/rest/api/2/version/12333945";,
+        "id": "12333945",
         "name": "3.0.1",
         "archived": true,
         "released": false,
-        "projectId": 10600
+        "projectId": 12318823
       },
       {
-        "self": "https://malhar.atlassian.net/rest/api/2/version/12001";,
-        "id": "12001",
+        "self": "https://issues.apache.org/jira/rest/api/2/version/12334260";,
+        "id": "12334260",
         "name": "3.2.1",
         "archived": false,
         "released": false,
-        "projectId": 10600
+        "projectId": 12318823
       },
       {
-        "self": "https://malhar.atlassian.net/rest/api/2/version/12000";,
-        "id": "12000",
+        "self": "https://issues.apache.org/jira/rest/api/2/version/12333950";,
+        "id": "12333950",
         "name": "3.3.0",
         "archived": false,
         "released": false,
-        "projectId": 10600
+        "projectId": 12318823
       },
       {
-        "self": "https://malhar.atlassian.net/rest/api/2/version/11703";,
-        "id": "11703",
+        "self": "https://issues.apache.org/jira/rest/api/2/version/12333948";,
+        "id": "12333948",
         "name": "4.0.0",
         "archived": false,
         "released": false,
-        "projectId": 10600
+        "projectId": 12318823
       }
-    ],
+    ]
+  },
+  "malhar": {
+    "name": "MLHR",
+    "browseUrl": "https://malhar.atlassian.net/browse";,
     "jiras": [
       {
         "expand": 
"operations,versionedRepresentations,editmeta,changelog,transitions,renderedFields",
@@ -516,6 +500,7 @@
         "key": "MLHR-1720",
         "fields": {
           "summary": "Development of Inner Join Operator",
+          "description": null,
           "fixVersions": [],
           "priority": {
             "self": "https://malhar.atlassian.net/rest/api/2/priority/3";,
@@ -546,6 +531,7 @@
         "key": "MLHR-1811",
         "fields": {
           "summary": "Add Non-Equality Join Condition",
+          "description": "Add new condition for non-equality join predicate 
(for example, user.zipcode != authzn.zipcode)",
           "fixVersions": [],
           "priority": {
             "self": "https://malhar.atlassian.net/rest/api/2/priority/3";,
@@ -576,6 +562,7 @@
         "key": "MLHR-1818",
         "fields": {
           "summary": "Create a Calcite operator to enable SQL commands to be 
run",
+          "description": "Once we have ability to code generate, we should 
take a look at integrating Calcite into Apex. The operator that enables 
populate DAG, should use Calcite to generate the DAG, given a SQL query.",
           "fixVersions": [],
           "priority": {
             "self": "https://malhar.atlassian.net/rest/api/2/priority/3";,
@@ -606,6 +593,7 @@
         "key": "MLHR-1843",
         "fields": {
           "summary": "Split Malhar Library and Malhar Contrib package into 
baby packages",
+          "description": "[~andyp] I am assigning this to you cause you are 
the one who first said it. So either you lead it or find a willing lead to get 
this task to completion.\r\n\r\nThe problem with contrib and library modules of 
malhar is that a ton of dependencies are prescribed as optional. The motive 
behind it was that the users of these libraries are given an opportunity to 
keep the size of the dependency-included packages to bare minimum. It  comes at 
a cost that the dependency now has to be manually figured out. This is a 
complete misuse of the optional dependency, IMO. It defeats the purpose of 
maven having dependency management as one of the biggest features of 
it.\r\n\r\nSo keep things sane - the proposed compromise is that we start 
creating smaller discreet packages for discrete technologies. ",
           "fixVersions": [],
           "priority": {
             "self": "https://malhar.atlassian.net/rest/api/2/priority/2";,
@@ -636,6 +624,7 @@
         "key": "MLHR-1873",
         "fields": {
           "summary": "Create a fault-tolerant/scalable cache component backed 
by a persistent store",
+          "description": "We need a Scalable Cache component in Malhar. 
Following are some of the key features of the cache\r\n\r\n1. Cache has limited 
size and is backed by a persistent store.\r\n2. When there is a cache miss, the 
data is loaded from backup store.\r\n3. To minimize misses, a range of keys can 
be loaded.\r\n4. Ability to purge key/values\r\n5. Writing to the backup store 
should be optimized.\r\n6. It should provide support for fault-tolerance.",
           "fixVersions": [],
           "priority": {
             "self": "https://malhar.atlassian.net/rest/api/2/priority/3";,
@@ -666,6 +655,7 @@
         "key": "MLHR-1904",
         "fields": {
           "summary": "Rewrite kafka input operator to use 0.9.0 new consumer",
+          "description": null,
           "fixVersions": [
             {
               "self": "https://malhar.atlassian.net/rest/api/2/version/12000";,
@@ -704,6 +694,7 @@
         "key": "MLHR-1938",
         "fields": {
           "summary": "Operator checkpointing in distributed in-memory store",
+          "description": "Currently Apex engine provides operator 
checkpointing in Hdfs ( with Hdfs backed StorageAgents i.e. FSStorageAgent & 
AsyncFSStorageAgent )\r\nAs operator check-pointing is critical functionality 
of Apex streaming platform to ensure fault tolerant behavior, platform should 
also provide alternate StorageAgents which will work seamlessly with large 
applications that requires Exactly once semantics.\r\nHDFS read/write latency 
is limited and doesn't improve beyond certain point because of disk io & 
staging writes. Having alternate strategy to this check-pointing in fault 
tolerant distributed in-memory grid would ensure application stability and 
performance is not impacted by checkpointing\r\n\r\n*This feature will add 
below functionalities*\r\n* A KeyValue store interface which is used by 
In-memory checkpointing storage agent.\r\n* Abstract implementation of KeyValue 
storage agent which can be configured with concrete implementation of KeyValue 
store for checkpo
 inting.\r\n* Concrete implementation of In memory storage agent for Apache 
Geode\r\n\r\n*This feature depends on below APEX core feature* 
\r\nhttps://issues.apache.org/jira/browse/APEXCORE-283\r\n* Interface for 
storage agent to provide application id\r\n* Stram client changes to pass 
applicationId ",
           "fixVersions": [],
           "priority": {
             "self": "https://malhar.atlassian.net/rest/api/2/priority/4";,
@@ -734,6 +725,7 @@
         "key": "MLHR-1939",
         "fields": {
           "summary": "Stream API",
+          "description": null,
           "fixVersions": [],
           "priority": {
             "self": "https://malhar.atlassian.net/rest/api/2/priority/2";,
@@ -764,6 +756,7 @@
         "key": "MLHR-1942",
         "fields": {
           "summary": "Apex Operator for Apache Geode.",
+          "description": "We would like to contribute the Apache 
Geode(http://geode.incubator.apache.org/) Operator support for Apex.\r\nIt will 
basically be implementation for writing to geode region.\r\nThis is in 
continuation with the Operator checkpointing alternative under review 
(MLHR-1938)",
           "fixVersions": [],
           "priority": {
             "self": "https://malhar.atlassian.net/rest/api/2/priority/4";,
@@ -787,6 +780,40 @@
           }
         }
       }
+    ],
+    "versions": [
+      {
+        "self": "https://malhar.atlassian.net/rest/api/2/version/11604";,
+        "id": "11604",
+        "name": "3.0.1",
+        "archived": true,
+        "released": false,
+        "projectId": 10600
+      },
+      {
+        "self": "https://malhar.atlassian.net/rest/api/2/version/12001";,
+        "id": "12001",
+        "name": "3.2.1",
+        "archived": false,
+        "released": false,
+        "projectId": 10600
+      },
+      {
+        "self": "https://malhar.atlassian.net/rest/api/2/version/12000";,
+        "id": "12000",
+        "name": "3.3.0",
+        "archived": false,
+        "released": false,
+        "projectId": 10600
+      },
+      {
+        "self": "https://malhar.atlassian.net/rest/api/2/version/11703";,
+        "id": "11703",
+        "name": "4.0.0",
+        "archived": false,
+        "released": false,
+        "projectId": 10600
+      }
     ]
   }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-apex-site/blob/47565155/src/md/docs.md
----------------------------------------------------------------------
diff --git a/src/md/docs.md b/src/md/docs.md
index ab266d1..f3eb432 100644
--- a/src/md/docs.md
+++ b/src/md/docs.md
@@ -1,9 +1,5 @@
 # Documentation
 
-New documentation will be coming soon!
-
-Here are a few resources to get started with Apex:
-
 ### Getting Started
 
 - [Building Your First Apache Apex Application 
(video)](https://youtu.be/LwRWBudOjg4)
@@ -20,3 +16,6 @@ Here are a few resources to get started with Apex:
 - [JavaDoc](https://www.datatorrent.com/docs/apidocs/)
 - [Benchmarks compare between 2.0 and 
3.0](https://www.datatorrent.com/blog-apex-performance-benchmark/)
 
+### Roadmap
+
+- [Apex Roadmap](roadmap.html) comprises key features planned for the future 
releases

http://git-wip-us.apache.org/repos/asf/incubator-apex-site/blob/47565155/src/pages/roadmap.html
----------------------------------------------------------------------
diff --git a/src/pages/roadmap.html b/src/pages/roadmap.html
new file mode 100644
index 0000000..fe704ee
--- /dev/null
+++ b/src/pages/roadmap.html
@@ -0,0 +1,79 @@
+{{> header}}
+
+<div class="container">
+  
+  <h1>Apex Roadmap</h1>
+  
+  <!-- APEX CORE ROADMAP -->
+  {{#if roadmap.core.jiras.length}}
+  <h2>Core</h2>
+  <table class="table table-bordered table-striped">
+    <thead>
+      <tr>
+        <th scope="col">JIRA</th>
+        <th scope="col">Summary</th>
+        <th scope="col">Version</th>
+      </tr>
+    </thead>
+    <tbody>
+      {{#each roadmap.core.jiras as |jira jIndex|}}
+      <tr>
+        <td>
+          <a target="_blank" href="{{ ../roadmap.core.browseUrl }}/{{ jira.key 
}}">{{ jira.key }}</a>
+        </td>
+        <td title="{{ jira.fields.description }}">
+          {{jira.fields.summary}}
+        </td>
+        <td>
+    
+          {{#each jira.fields.fixVersions as |version vIndex|}}
+
+            <a target="_blank" href="{{ ../../roadmap.core.browseUrl }}/{{ 
../../roadmap.core.name }}/fixforversion/{{ version.id }}">{{ version.name 
}}</a>&nbsp;
+
+          {{/each}}
+
+        </td>
+      </tr>
+      {{/each}}
+    </tbody>
+  </table>
+  {{/if}}
+
+  <!-- APEX MALHAR ROADMAP -->
+  {{#if roadmap.malhar.jiras.length}}
+  <h2>Malhar</h2>
+  <table class="table table-bordered table-striped">
+    <thead>
+      <tr>
+        <th scope="col">JIRA</th>
+        <th scope="col">Summary</th>
+        <th scope="col">Version</th>
+      </tr>
+    </thead>
+    <tbody>
+      {{#each roadmap.malhar.jiras as |jira jIndex|}}
+      <tr>
+        <td>
+          <a target="_blank" href="{{ ../roadmap.malhar.browseUrl }}/{{ 
jira.key }}">{{ jira.key }}</a>
+        </td>
+        <td title="{{ jira.fields.description }}">
+          {{jira.fields.summary}}
+        </td>
+        <td>
+    
+          {{#each jira.fields.fixVersions as |version vIndex|}}
+
+            <a target="_blank" href="{{ ../../roadmap.malhar.browseUrl }}/{{ 
../../roadmap.malhar.name }}/fixforversion/{{ version.id }}">{{ version.name 
}}</a>&nbsp;
+
+          {{/each}}
+
+        </td>
+      </tr>
+      {{/each}}
+    </tbody>
+  </table>
+  {{/if}}
+
+</div>
+
+{{> footer}}
\ No newline at end of file

Reply via email to