http://git-wip-us.apache.org/repos/asf/incubator-crail-website/blob/004e8a6d/content/css/group.css
----------------------------------------------------------------------
diff --git a/content/css/group.css b/content/css/group.css
deleted file mode 100644
index b03d722..0000000
--- a/content/css/group.css
+++ /dev/null
@@ -1,215 +0,0 @@
-/* Space out content a bit */
- html {
-    overflow:scroll;    
-} 
-
-body {
-  padding-top: 20px;
-  padding-bottom: 20px;
-}
-
-/* Everything but the jumbotron gets side spacing for mobile first views */
-.header,
-.footer {
-  padding-left: 15px;
-  padding-right: 15px;
-}
-
-/* Custom page header */
-.header {
-  border-bottom: 1px solid #e5e5e5;
-}
-
-/* Custom page footer */
-.footer {
-  padding-top: 19px;
-  color: #777;
-  border-top: 1px solid #e5e5e5;
-  text-align: center;
-  font-size: 0.8em;
-  clear: both;
-}
-.footer p {
-  margin: 0.1em 0;
-}
-.footer a:link, .footer a:visited {
-  color: #777;
-}
-
-/* Customize container */
-@media (min-width: 768px) {
-  .container {
-    max-width: 767px;
-  }
-}
-.container-narrow > hr {
-  margin: 30px 0;
-}
-
-/* Jumbotron */
-.jumbotron {
-  text-align: center;
-  border-bottom: 1px solid #e5e5e5;
-}
-.jumbotron p {
-    font-size: 15px;
-}
-.jumbotron {
-    padding-top: 18px;
-    padding-bottom: 10px;
-}
-
-/* Group logo */
-#logo {
-    height: 35px;
-    margin: 2px 0 8px 0;
-}
-
-/* Responsive: Portrait tablets and up */
-@media screen and (min-width: 768px) {
-  /* Remove the padding we set earlier */
-  .header,
-  .footer {
-    clear: both;
-    padding-left: 0;
-    padding-right: 0;
-  }
-  /* Space out the masthead */
-  .header {
-    margin-bottom: 30px;
-  }
-  /* Remove the bottom border on the jumbotron for visual effect */
-  .jumbotron {
-    border-bottom: 0;
-  }
-}
-
-/* Slightly better-looking header on mobile. */
-@media screen and (max-width: 767px) {
-    .nav-pills {
-        display: block;
-        margin: 0 auto 1em;
-    }
-    #logo {
-        display: block;
-        clear: both;
-        margin: 1em auto;
-    }
-}
-
-/* Photo */
-.inset-image {
-    width: 60%;
-    margin: 0 auto;
-}
-
-/* People list */
-
-dl.people dl {
-  width: 100%;
-  clear: both;
-  overflow: hidden; 
-  margin: 0;
-}
-
-dl.people dt {
-  clear: both;
-  float: left;
-  content: " ";
-  width: 15%;
-  margin-bottom: 2em;
-  margin: 0;
-}
-
-dl.people dd {
-  width: 85%; 
-  min-height: 6.5em;
-  margin-bottom: 1em;
-}
-
-
-
-/* Front page news. */
-ul.news .date {
-    color: #999;
-    font-weight: bold;
-    display: block;
-}
-ul.news > li {
-    margin-top: 1em;
-}
-ul.news .shortnews .date {
-    float: left;
-    margin-right: 1em;
-}
-ul.news .bloglink a {
-    font-size: 1.4em;
-}
-
-.illustration {
-    float: right;
-    margin: 0 0 1em 1em;
-}
-
-ul.double-col {
-   columns: 2;
-  -webkit-columns: 2;
-  -moz-columns: 2;
-   overflow:hidden;
-   width: 800px;
-}
-
-div.bio {
-    margin: 0 0 1em 1em;
-}
-
-ul.double-col li {
-    clear: both;
-    height: auto;
-    display: inline;
-    vertical-align: middle;
-    width: 100%;
-    margin: .5rem 0rem;
-    float: left;
-}
-
-.col-md-8 {
-}
-
-.col-md-4 {
-}
-
-table tr:nth-child(odd) {
-    background-color: #FFFFFF;
-}
-
-table tr:nth-child(even) {
-    background-color: #F5F5F5;
-}
-
-table tr:first-child th {
-  border-top: 1;
-}
-
-table tr:last-child td {
-  border-bottom: 1;
-}
-
-table tr td:first-child, table tr th:first-child {
-  border-left: 1;
-}
-
-table tr td:last-child, table tr th:last-child {
-  border-right: 1;
-}
-table, th {
-    background-color: #F5F5F5;
-    border-color: lightgrey;
-}
-
-th, td {
-    border: 0.75px solid grey;
-    padding: 7px;
-    line-height: 24px;
-    border-color: lightgrey;
-}

http://git-wip-us.apache.org/repos/asf/incubator-crail-website/blob/004e8a6d/content/css/print.css
----------------------------------------------------------------------
diff --git a/content/css/print.css b/content/css/print.css
deleted file mode 100644
index e95929e..0000000
--- a/content/css/print.css
+++ /dev/null
@@ -1,36 +0,0 @@
-.container {
-    -moz-column-count: 2;
-    -webkit-column-count: 2;
-    column-count: 2;
-
-    font-size: 0.8em;
-}
-
-.header {
-    display: none;
-}
-
-h2 {
-    font-size: 1.3em;
-    margin: 0;
-}
-h3 {
-    font-size: 1.2em;
-    margin-top: 0;
-}
-
-.noprint {
-    display: none;
-}
-
-body {
-    padding: -2em 0 0 0;
-}
-
-/* Disable Bootstrap's link display. */
-@media print {
-    a:link:after,
-    a:visited:after {
-        content: "" !important;
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-crail-website/blob/004e8a6d/content/documentation/index.html
----------------------------------------------------------------------
diff --git a/content/documentation/index.html b/content/documentation/index.html
deleted file mode 100644
index 1da8b79..0000000
--- a/content/documentation/index.html
+++ /dev/null
@@ -1,108 +0,0 @@
-<!DOCTYPE html>
-<html>
-    <head>
-        <meta charset="utf-8">
-        <title>The Apache Crail (Incubating) Project: Documentation</title>
-        <meta name="viewport" content="width=device-width, initial-scale=1.0">
-        <link href="//crail.incubator.apache.org/css/bootstrap.min.css" 
rel="stylesheet">
-        <link href="//crail.incubator.apache.org/css/group.css" 
rel="stylesheet">
-        <link rel="alternate" type="application/atom+xml" title="Atom"
-            href="//crail.incubator.apache.org/blog/blog.xml">
-        
-        <meta property="og:image" 
content="//crail.incubator.apache.org/img/blog/preview/documentation-summary.png"
 />
-        <meta property="og:image:secure_url" 
content="//crail.incubator.apache.org/img/blog/preview/documentation-summary.png"
 />
-    </head>
-
-    <body>
-        <div class="container">
-          <div class="header">
-            <ul class="nav nav-pills pull-right">
-              
-              
-                
-                <li >
-                  <a href="//crail.incubator.apache.org/">
-                    Home
-                  </a>
-                </li>
-              
-                
-                <li >
-                  <a href="//crail.incubator.apache.org/overview/">
-                    Overview
-                  </a>
-                </li>
-              
-                
-                <li >
-                  <a href="//crail.incubator.apache.org/download/">
-                    Downloads
-                  </a>
-                </li>
-              
-                
-                <li >
-                  <a href="//crail.incubator.apache.org/blog/">
-                    Blog
-                  </a>
-                </li>
-              
-                
-                <li >
-                  <a href="//crail.incubator.apache.org/community/">
-                    Community
-                  </a>
-                </li>
-              
-                
-                <li class="active">
-                  <a href="//crail.incubator.apache.org/documentation/">
-                    Documentation
-                  </a>
-                </li>
-              
-            </ul>
-            <a href="//crail.incubator.apache.org/">
-                <img src="//crail.incubator.apache.org/img/crail_logo.png"
-                    srcset="//crail.incubator.apache.org/img/crail_logo.png"
-                    alt="Crail" id="logo">
-            </a>
-          </div>
-
-          
-          
-          <h2>Documentation</h2>   
-          
-
-          <p>Read the docs (rtd) build is available at: <a 
href="https://incubator-crail.readthedocs.io/en/latest/";>https://incubator-crail.readthedocs.io/en/latest/</a></p>
-
-<h2 id="presentations">Presentations</h2>
-<ul>
-  <li><a href="//crail.incubator.apache.org/files/crail-project-web.pdf">Data 
Processing at the Speed of 100 Gbps using Apache Crail (Incubating)</a>, ETH 
Zurich, September, 2018.</li>
-  <li><a 
href="https://databricks.com/session/serverless-machine-learning-on-modern-hardware-using-apache-spark";>Serverless
 Machine Learning on Modern Hardware Using Apache Spark</a>, Spark AI Summit, 
2018.</li>
-  <li><a 
href="https://dataworkssummit.com/san-jose-2018/session/data-processing-at-the-speed-of-100-gbpsapache-crail-incubating/";>Data
 processing at the speed of 100 Gbps@Apache Crail (Incubating)</a>, DataWorks 
Summit, 2018.</li>
-  <li><a 
href="https://databricks.com/session/running-apache-spark-on-a-high-performance-cluster-using-rdma-and-nvme-flash";>Running
 Apache Spark on a High-Performance Cluster Using RDMA and NVMe Flash</a>, 
Spark Summit 2017.</li>
-</ul>
-
-<h2 id="papers">Papers</h2>
-
-<ul>
-  <li>Crail: A High-Performance I/O Architecture for Distributed Data 
Processing, In the IEEE Bulletin of the Technical Committee on Data 
Engineering, Special Issue on Distributed Data Management with RDMA, Volume 40, 
pages 40-52, March, 2017.</li>
-</ul>
-
-
-
-        <br>
-       <br> 
-          <div class="footer">
-            <p>Apache Crail is an effort undergoing <a 
href="https://incubator.apache.org/";>incubation</a> at <a 
href="https://www.apache.org/";>The Apache Software Foundation (ASF)</a>, 
sponsored by the Apache Incubator PMC. Incubation is required of all newly 
accepted projects until a further review indicates that the infrastructure, 
communications, and decision making process have stabilized in a manner 
consistent with other successful ASF projects. While incubation status is not 
necessarily a reflection of the completeness or stability of the code, it does 
indicate that the project has yet to be fully endorsed by the ASF.
-            </p>
-          </div>
-
-        </div> <!-- /container -->
-
-        <!-- Support retina images. -->
-        <script type="text/javascript"
-            src="//crail.incubator.apache.org/js/srcset-polyfill.js"></script>
-    </body>
-</html>

http://git-wip-us.apache.org/repos/asf/incubator-crail-website/blob/004e8a6d/content/download/index.html
----------------------------------------------------------------------
diff --git a/content/download/index.html b/content/download/index.html
deleted file mode 100644
index 220fac2..0000000
--- a/content/download/index.html
+++ /dev/null
@@ -1,126 +0,0 @@
-<!DOCTYPE html>
-<html>
-    <head>
-        <meta charset="utf-8">
-        <title>The Apache Crail (Incubating) Project: Downloads</title>
-        <meta name="viewport" content="width=device-width, initial-scale=1.0">
-        <link href="//crail.incubator.apache.org/css/bootstrap.min.css" 
rel="stylesheet">
-        <link href="//crail.incubator.apache.org/css/group.css" 
rel="stylesheet">
-        <link rel="alternate" type="application/atom+xml" title="Atom"
-            href="//crail.incubator.apache.org/blog/blog.xml">
-        
-        <meta property="og:image" 
content="//crail.incubator.apache.org/img/blog/preview/download-summary.png" />
-        <meta property="og:image:secure_url" 
content="//crail.incubator.apache.org/img/blog/preview/download-summary.png" />
-    </head>
-
-    <body>
-        <div class="container">
-          <div class="header">
-            <ul class="nav nav-pills pull-right">
-              
-              
-                
-                <li >
-                  <a href="//crail.incubator.apache.org/">
-                    Home
-                  </a>
-                </li>
-              
-                
-                <li >
-                  <a href="//crail.incubator.apache.org/overview/">
-                    Overview
-                  </a>
-                </li>
-              
-                
-                <li class="active">
-                  <a href="//crail.incubator.apache.org/download/">
-                    Downloads
-                  </a>
-                </li>
-              
-                
-                <li >
-                  <a href="//crail.incubator.apache.org/blog/">
-                    Blog
-                  </a>
-                </li>
-              
-                
-                <li >
-                  <a href="//crail.incubator.apache.org/community/">
-                    Community
-                  </a>
-                </li>
-              
-                
-                <li >
-                  <a href="//crail.incubator.apache.org/documentation/">
-                    Documentation
-                  </a>
-                </li>
-              
-            </ul>
-            <a href="//crail.incubator.apache.org/">
-                <img src="//crail.incubator.apache.org/img/crail_logo.png"
-                    srcset="//crail.incubator.apache.org/img/crail_logo.png"
-                    alt="Crail" id="logo">
-            </a>
-          </div>
-
-          
-          
-          <h2>Downloads</h2>   
-          
-
-          <h3 id="source-release">Source Release</h3>
-
-<p>Apache Crail 1.0-incubating</p>
-
-<table>
-  <tbody>
-    <tr>
-      <td><a 
href="https://www.apache.org/dyn/closer.lua/incubator/crail/1.0-incubating/apache-crail-1.0-incubating-src.zip";>zip</a></td>
-      <td><a 
href="https://www.apache.org/dist/incubator/crail/1.0-incubating/apache-crail-1.0-incubating-src.zip.sha256";>sha256</a></td>
-      <td><a 
href="https://www.apache.org/dist/incubator/crail/1.0-incubating/apache-crail-1.0-incubating-src.zip.asc";>signature</a></td>
-    </tr>
-    <tr>
-      <td><a 
href="https://www.apache.org/dyn/closer.lua/incubator/crail/1.0-incubating/apache-crail-1.0-incubating-src.tar.gz";>tar.gz</a></td>
-      <td><a 
href="https://www.apache.org/dist/incubator/crail/1.0-incubating/apache-crail-1.0-incubating-src.tar.gz.sha256";>sha256</a></td>
-      <td><a 
href="https://www.apache.org/dist/incubator/crail/1.0-incubating/apache-crail-1.0-incubating-src.tar.gz.asc";>signature</a></td>
-    </tr>
-  </tbody>
-</table>
-
-<h3 id="release-notes">Release Notes</h3>
-
-<p>The latest release notes can be found <a 
href="https://github.com/apache/incubator-crail/blob/master/HISTORY.md";>here</a></p>
-
-<h3 id="previous-releases">Previous Releases</h3>
-
-<p>Previous releases of Apache Crail can be found in the <a 
href="http://archive.apache.org/dist/incubator/crail/";>archive</a>.</p>
-
-<h3 id="verifying-a-release">Verifying a Release</h3>
-
-<p>You can find instructions for checking hashes and signatures on the <a 
href="http://www.apache.org/info/verification.html";>Verifying Apache Software 
Foundation Releases</a> page. The committer’s pgp signatures can be found <a 
href="https://www.apache.org/dist/incubator/crail/KEYS";>here</a>.</p>
-
-<p>For fast downloads, current source distributions are hosted on mirror 
servers; older source distributions are in the archive.
-For security, hash and signature files are always hosted at Apache.</p>
-
-
-
-        <br>
-       <br> 
-          <div class="footer">
-            <p>Apache Crail is an effort undergoing <a 
href="https://incubator.apache.org/";>incubation</a> at <a 
href="https://www.apache.org/";>The Apache Software Foundation (ASF)</a>, 
sponsored by the Apache Incubator PMC. Incubation is required of all newly 
accepted projects until a further review indicates that the infrastructure, 
communications, and decision making process have stabilized in a manner 
consistent with other successful ASF projects. While incubation status is not 
necessarily a reflection of the completeness or stability of the code, it does 
indicate that the project has yet to be fully endorsed by the ASF.
-            </p>
-          </div>
-
-        </div> <!-- /container -->
-
-        <!-- Support retina images. -->
-        <script type="text/javascript"
-            src="//crail.incubator.apache.org/js/srcset-polyfill.js"></script>
-    </body>
-</html>

http://git-wip-us.apache.org/repos/asf/incubator-crail-website/blob/004e8a6d/content/favicon.png
----------------------------------------------------------------------
diff --git a/content/favicon.png b/content/favicon.png
deleted file mode 100644
index 1ca0ce5..0000000
Binary files a/content/favicon.png and /dev/null differ

http://git-wip-us.apache.org/repos/asf/incubator-crail-website/blob/004e8a6d/content/feed.xml
----------------------------------------------------------------------
diff --git a/content/feed.xml b/content/feed.xml
index 8ca9533..b12bbc8 100644
--- a/content/feed.xml
+++ b/content/feed.xml
@@ -1,556 +1 @@
-<?xml version="1.0" encoding="utf-8"?><feed 
xmlns="http://www.w3.org/2005/Atom"; ><generator uri="https://jekyllrb.com/"; 
version="3.7.0">Jekyll</generator><link 
href="http://crail.incubator.apache.org//feed.xml"; rel="self" 
type="application/atom+xml" /><link href="http://crail.incubator.apache.org//"; 
rel="alternate" type="text/html" 
/><updated>2018-10-22T15:30:53+02:00</updated><id>http://crail.incubator.apache.org//</id><title
 type="html">The Apache Crail (Incubating) Project</title><entry><title 
type="html">Sql P1 News</title><link 
href="http://crail.incubator.apache.org//blog/2018/08/sql-p1-news.html"; 
rel="alternate" type="text/html" title="Sql P1 News" 
/><published>2018-08-09T00:00:00+02:00</published><updated>2018-08-09T00:00:00+02:00</updated><id>http://crail.incubator.apache.org//blog/2018/08/sql-p1-news</id><content
 type="html" 
xml:base="http://crail.incubator.apache.org//blog/2018/08/sql-p1-news.html";>&lt;p&gt;A
 new blog &lt;a href=&quot;//crail.incubator.apache.org/blog/201
 8/08/sql-p1.html&quot;&gt;post&lt;/a&gt; discussing file formats performance 
is now online&lt;/p&gt;</content><author><name></name></author><category 
term="news" /><summary type="html">A new blog post discussing file formats 
performance is now online</summary></entry><entry><title type="html">SQL 
Performance: Part 1 - Input File Formats</title><link 
href="http://crail.incubator.apache.org//blog/2018/08/sql-p1.html"; 
rel="alternate" type="text/html" title="SQL Performance: Part 1 - Input File 
Formats" 
/><published>2018-08-08T00:00:00+02:00</published><updated>2018-08-08T00:00:00+02:00</updated><id>http://crail.incubator.apache.org//blog/2018/08/sql-p1</id><content
 type="html" 
xml:base="http://crail.incubator.apache.org//blog/2018/08/sql-p1.html";>&lt;div 
style=&quot;text-align: justify&quot;&gt;
-&lt;p&gt;
-This is the first user blog post in a multi-part series where we will focus on 
relational data processing performance (e.g., SQL) in presence of 
high-performance network and storage devices - the kind of devices that Crail 
targets. Relational data processing is one of the most popular and versatile 
workloads people run in the  cloud. The general idea is that data is stored in 
tables with a schema, and is processed using a domain specific language like 
SQL. Examples of some popular systems that support such relational data 
analytics in the cloud are &lt;a 
href=&quot;https://spark.apache.org/sql/&quot;&gt;Apache Spark/SQL&lt;/a&gt;, 
&lt;a href=&quot;https://hive.apache.org/&quot;&gt;Apache Hive&lt;/a&gt;, &lt;a 
href=&quot;https://impala.apache.org/&quot;&gt;Apache Impala&lt;/a&gt;, etc. In 
this post, we discuss the important first step in relational data processing, 
which is the reading of input data tables.
-&lt;/p&gt;
-&lt;/div&gt;
-
-&lt;h3 id=&quot;hardware-and-software-configuration&quot;&gt;Hardware and 
Software Configuration&lt;/h3&gt;
-
-&lt;p&gt;The specific cluster configuration used for the experiments in this 
blog:&lt;/p&gt;
-
-&lt;ul&gt;
-  &lt;li&gt;Cluster
-    &lt;ul&gt;
-      &lt;li&gt;4 compute + 1 management node x86_64 cluster&lt;/li&gt;
-    &lt;/ul&gt;
-  &lt;/li&gt;
-  &lt;li&gt;Node configuration
-    &lt;ul&gt;
-      &lt;li&gt;CPU: 2 x Intel(R) Xeon(R) CPU E5-2690 0 @ 2.90GHz&lt;/li&gt;
-      &lt;li&gt;DRAM: 256 GB DDR3&lt;/li&gt;
-      &lt;li&gt;Network: 1x100Gbit/s Mellanox ConnectX-5&lt;/li&gt;
-    &lt;/ul&gt;
-  &lt;/li&gt;
-  &lt;li&gt;Software
-    &lt;ul&gt;
-      &lt;li&gt;Ubuntu 16.04.3 LTS (Xenial Xerus) with Linux kernel version 
4.10.0-33-generic&lt;/li&gt;
-      &lt;li&gt;Apache HDFS (2.7.3)&lt;/li&gt;
-      &lt;li&gt;Apache Paruqet (1.8), Apache ORC (1.4), Apache Arrow (0.8), 
Apache Avro (1.4)&lt;/li&gt;
-      &lt;li&gt;&lt;a 
href=&quot;https://github.com/apache/incubator-crail/&quot;&gt;Apache Crail 
(incubating) with NVMeF support&lt;/a&gt;, commit 
64e635e5ce9411041bf47fac5d7fadcb83a84355 (since then Crail has a stable source 
release v1.0 with a newer NVMeF code-base)&lt;/li&gt;
-    &lt;/ul&gt;
-  &lt;/li&gt;
-&lt;/ul&gt;
-
-&lt;h3 id=&quot;overview&quot;&gt;Overview&lt;/h3&gt;
-
-&lt;p&gt;In a typical cloud-based relational data processing setup, the input 
data is stored on an external data storage solution like HDFS or AWS S3. Data 
tables and their associated schema are converted into a storage-friendly format 
for optimal performance. Examples of some popular and familiar file formats are 
&lt;a href=&quot;https://parquet.apache.org/&quot;&gt;Apache Parquet&lt;/a&gt;, 
&lt;a href=&quot;https://orc.apache.org/&quot;&gt;Apache ORC&lt;/a&gt;, &lt;a 
href=&quot;https://avro.apache.org/&quot;&gt;Apache Avro&lt;/a&gt;, &lt;a 
href=&quot;https://en.wikipedia.org/wiki/JSON&quot;&gt;JSON&lt;/a&gt;, etc. 
More recently, &lt;a href=&quot;https://arrow.apache.org/&quot;&gt;Apache 
Arrow&lt;/a&gt; has been introduced to standardize the in-memory columnar data 
representation between multiple frameworks. To be precise, Arrow is not a 
storage format but it defines an &lt;a 
href=&quot;https://github.com/apache/arrow/blob/master/format/IPC.md&quot;&gt;interprocess
 communication (I
 PC) format&lt;/a&gt; that can be used to store data in a stroage system (our 
binding for reading Arrow IPC messages from HDFS is available &lt;a 
href=&quot;https://github.com/zrlio/fileformat-benchmarks/blob/master/src/main/java/com/github/animeshtrivedi/FileBench/HdfsSeekableByteChannel.java&quot;&gt;here&lt;/a&gt;).
 There is no one size fits all as all these formats have their own strengths, 
weaknesses, and features. In this blog, we are specifically interested in the 
performance of these formats on modern high-performance networking and storage 
devices.&lt;/p&gt;
-
-&lt;figure&gt;&lt;div style=&quot;text-align:center&quot;&gt;&lt;img 
src=&quot;//crail.incubator.apache.org/img/blog/sql-p1/outline.svg&quot; 
width=&quot;550&quot; /&gt;&lt;figcaption&gt;Figure 1: The benchmarking setup 
with HDFS and file formats on a 100 Gbps network with NVMe flash devices. All 
formats contains routines for compression, encoding, and value materialization 
with associated I/O buffer management and data copies 
routines.&lt;p&gt;&lt;/p&gt;&lt;/figcaption&gt;&lt;/div&gt;&lt;/figure&gt;
-
-&lt;p&gt;To benchmark the performance of file formats, we wrote a set of 
micro-benchmarks which are available at &lt;a 
href=&quot;https://github.com/zrlio/fileformat-benchmarks&quot;&gt;https://github.com/zrlio/fileformat-benchmarks&lt;/a&gt;.
 We cannot use typical SQL micro-benchmarks because every SQL engine has its 
own favorite file format, on which it performs the best. Hence, in order to 
ensure parity, we decoupled the performance of reading the input file format 
from the SQL query processing by writing simple table reading micro-benchmarks. 
Our benchmark reads in the store_sales table from the TPC-DS dataset (scale 
factor 100), and calculates a sum of values present in the table. The table 
contains 23 columns of integers, doubles, and longs.&lt;/p&gt;
-
-&lt;figure&gt;&lt;div style=&quot;text-align:center&quot;&gt;&lt;img 
src=&quot;//crail.incubator.apache.org/img/blog/sql-p1/performance-all.svg&quot;
 width=&quot;550&quot; /&gt;&lt;figcaption&gt;Figure 2: Performance of JSON, 
Avro, Parquet, ORC, and Arrow on NVMe devices over a 100 Gbps 
network.&lt;p&gt;&lt;/p&gt;&lt;/figcaption&gt;&lt;/div&gt;&lt;/figure&gt;
-
-&lt;p&gt;We evaluate the performance of the benchmark on a 3 node HDFS cluster 
connected using 100 Gbps RoCE. One datanode in HDFS contains 4 NVMe devices 
with a collective aggregate bandwidth of 12.5 GB/sec (equals to 100 Gbps, 
hence, we have a balanced network and storage performance). Figure 2 shows our 
results where none of the file formats is able to deliver the full hardware 
performance for reading input files. One third of the performance is already 
lost in HDFS (maximum throughput 74.9 Gbps out of possible 100 Gbps). The rest 
of the performance is lost inside the file format implementation, which needs 
to deal with encoding, buffer and I/O management, compression, etc. The best 
performer is Apache Arrow which is designed for in-memory columnar datasets. 
The performance of these file formats are bounded by the performance of the 
CPU, which is 100% loaded during the experiment. For a detailed analysis of the 
file formats, please refer to our paper - &lt;a href=&quot;https://ww
 w.usenix.org/conference/atc18/presentation/trivedi&quot;&gt;Albis: 
High-Performance File Format for Big Data Systems (USENIX, ATC’18)&lt;/a&gt;. 
As a side-note on the Arrow performance - we have evaluated the performance of 
&lt;em&gt;implementation of Arrow’s Java library&lt;/em&gt;. As this library 
has been focused on interactions with off-heap memory, there is a head room for 
optimizing the HDFS/on-heap reading path of Arrow’s Java library.&lt;/p&gt;
-
-&lt;h3 
id=&quot;albis-high-performance-file-format-for-big-data-systems&quot;&gt;Albis:
 High-Performance File Format for Big Data Systems&lt;/h3&gt;
-
-&lt;p&gt;Based on these findings, we have developed a new file format called 
Albis. Albis is built on similar design choices as Crail. The top-level idea is 
to leverage the performance of modern networking and storage devices without 
being bottleneck by the CPU. While designing Albis we revisited many outdated 
assumptions about the nature of I/O in a distributed setting, and came up with 
the following ideas:&lt;/p&gt;
-
-&lt;ul&gt;
-  &lt;li&gt;No compression or encoding: Modern network and storage devices are 
fast. Hence, there is no need to trade CPU cycles for performance. A 4 byte 
integer should be stored as a 4 byte value.&lt;/li&gt;
-  &lt;li&gt;Keep the data/metadata management simple: Albis splits a table 
into row and column groups, which are stored in hierarchical files and 
directories on the underlying file system (e.g., HDFS or Crail).&lt;/li&gt;
-  &lt;li&gt;Careful object materialization using a binary API: To optimize the 
runtime representation in managed runtimes like the JVM, only objects which are 
necessary for SQL processing are materialized. Otherwise, a 4 byte integer can 
be passed around as a byte array (using the binary API of Albis).&lt;/li&gt;
-&lt;/ul&gt;
-
-&lt;figure&gt;&lt;div style=&quot;text-align:center&quot;&gt;&lt;img 
src=&quot;//crail.incubator.apache.org/img/blog/sql-p1/core-scalability.svg&quot;
 width=&quot;550&quot; /&gt;&lt;figcaption&gt;Figure 3: Core scalability of 
JSON, Avro, Parquet, ORC, Arrow, and Albis on 
HDFS/NVMe.&lt;p&gt;&lt;/p&gt;&lt;/figcaption&gt;&lt;/div&gt;&lt;/figure&gt;
-
-&lt;p&gt;Using the Albis format, we revise our previous experiment where we 
read the input store_sales table from HDFS. In the figure above, we show the 
performance of Albis and other file formats with number of CPU cores involved. 
At the right hand of the x-axis, we have performance with all 16 cores engaged, 
hence, representing the peak possible performance. As evident, Albis delivers 
59.9 Gbps out of 74.9 Gbps possible bandwidth with HDFS over NVMe. Albis 
performance is 1.9 - 21.4x better than other file formats. To give an 
impression where the performance is coming from, in the table below we show 
some micro-architectural features for Parquet, ORC, Arrow, and Albis. Our 
previously discussed design ideas in Albis result in a shorter code path (shown 
as less instructions required for each row), better cache performance (shows as 
lower cache misses per row), and clearly better performance (shown as 
nanoseconds required per row for processing). For a detailed evaluation of 
Albis ple
 ase refer to our paper.&lt;/p&gt;
-
-&lt;table style=&quot;width:100%&quot;&gt;
-  &lt;caption&gt; Table 1: Micro-architectural analysis for Parquet, ORC, 
Arrow, and Albis on a 16-core Xeon machine.&lt;p&gt;&lt;/p&gt;&lt;/caption&gt;
-  &lt;tr&gt;
-    &lt;th&gt;&lt;/th&gt;
-    &lt;th&gt;Parquet&lt;/th&gt;
-    &lt;th&gt;ORC&lt;/th&gt; 
-    &lt;th&gt;Arrow&lt;/th&gt;
-    &lt;th&gt;Albis&lt;/th&gt;
-  &lt;/tr&gt;
-  &lt;tr&gt;
-    &lt;th&gt;Instructions/row&lt;/th&gt;
-    &lt;td&gt;6.6K&lt;/td&gt; 
-    &lt;td&gt;4.9K&lt;/td&gt; 
-    &lt;td&gt;1.9K&lt;/td&gt; 
-    &lt;td&gt;1.6K&lt;/td&gt; 
-  &lt;/tr&gt;
-  &lt;tr&gt;
-    &lt;th&gt;Cache misses/row&lt;/th&gt;
-    &lt;td&gt;9.2&lt;/td&gt; 
-    &lt;td&gt;4.6&lt;/td&gt; 
-    &lt;td&gt;5.1&lt;/td&gt; 
-    &lt;td&gt;3.0&lt;/td&gt; 
-  &lt;/tr&gt;
-  &lt;tr&gt;
-    &lt;th&gt;Nanoseconds/row&lt;/th&gt;
-    &lt;td&gt;105.3&lt;/td&gt; 
-    &lt;td&gt;63.0&lt;/td&gt; 
-    &lt;td&gt;31.2&lt;/td&gt; 
-    &lt;td&gt;20.8&lt;/td&gt; 
-  &lt;/tr&gt;
-&lt;/table&gt;
-&lt;p&gt;&lt;/p&gt;
-
-&lt;h3 id=&quot;apache-crail-incubating-with-albis&quot;&gt;Apache Crail 
(Incubating) with Albis&lt;/h3&gt;
-
-&lt;p&gt;For our final experiment, we try to answer the question what it would 
take to deliver the full 100 Gbps bandwidth for Albis. Certainly, the first 
bottleneck is to improve the base storage layer performance. Here we use Apache 
Crail (Incubating) with its &lt;a 
href=&quot;https://en.wikipedia.org/wiki/NVM_Express#NVMeOF&quot;&gt;NVMeF&lt;/a&gt;
 storage tier. This tier uses &lt;a 
href=&quot;https://github.com/zrlio/jNVMf&quot;&gt;jNVMf library&lt;/a&gt; to 
implement NVMeF stack in Java. As we have shown in a previous blog &lt;a 
href=&quot;//crail.incubator.apache.org/blog/2017/08/crail-nvme-fabrics-v1.html&quot;&gt;post&lt;/a&gt;
 that Crail’s NVMeF tier can deliver performance (97.8 Gbps) very close to 
the hardware limits. Hence, Albis with Crail is a perfect setup to evaluate on 
high-performance NVMe and RDMA devices. Before we get there, let’s get some 
calculations right. The store_sales table in the TPC-DS dataset has a data 
density of 93.9% (out of 100 bytes, only 93.9
  is data, others are null values). As we measure the goodput, the expected 
performance of Albis on Crail is 93.9% of 97.8 Gbps, which calculates to 91.8 
Gbps. In our experiments, Albis on Crail delivers 85.5 Gbps. Figure 4 shows 
more detailed results.&lt;/p&gt;
-
-&lt;figure&gt;&lt;div style=&quot;text-align:center&quot;&gt;&lt;img 
src=&quot;//crail.incubator.apache.org/img/blog/sql-p1/albis-crail.svg&quot; 
width=&quot;550&quot; /&gt;&lt;figcaption&gt;Figure 4: Performance of Albis on 
Crail.&lt;p&gt;&lt;/p&gt;&lt;/figcaption&gt;&lt;/div&gt;&lt;/figure&gt;
-
-&lt;p&gt;The left half of the figure shows the performance scalability of 
Albis on Crail in a setup with 1 core (8.9 Gbps) to 16 cores (85.5 Gbps). In 
comparison, the right half of the figure shows the performance of Crail on 
HDFS/NVMe at 59.9 Gbps, and on Crail/NVMe at 85.5 Gbps. The last bar shows the 
performance of Albis if the benchmark does not materialize Java object values. 
In this configuration, Albis on Crail delivers 91.3 Gbps, which is very close 
to the expected peak of 91.8 Gbps.&lt;/p&gt;
-
-&lt;h3 id=&quot;summary&quot;&gt;Summary&lt;/h3&gt;
-&lt;div style=&quot;text-align: justify&quot;&gt;
-&lt;p&gt;
-In this first blog of a multipart series, we have looked at the data ingestion 
performance of file formats on high-performance networking and storage devices. 
We found that popular file formats are in need for a performance revision. 
Based on our analysis, we designed and implemented Albis - a new file format 
for storing relational data. Albis and Crail share many design choices. Their 
combined performance of 85+ Gbps on a 100 Gbps network, gives us confidence in 
our approach and underlying software philosophy for both, Crail and Albis. 
-&lt;/p&gt;
-
-&lt;p&gt;
-Stay tuned for the next part where we look at workload-level performance in 
Spark/SQL on modern high-performance networking and storage devices. Meanwhile 
let us know if you have any feedback or comments. 
-&lt;/p&gt;
-&lt;/div&gt;</content><author><name>Animesh Trivedi</name></author><category 
term="blog" /><summary type="html">This is the first user blog post in a 
multi-part series where we will focus on relational data processing performance 
(e.g., SQL) in presence of high-performance network and storage devices - the 
kind of devices that Crail targets. Relational data processing is one of the 
most popular and versatile workloads people run in the cloud. The general idea 
is that data is stored in tables with a schema, and is processed using a domain 
specific language like SQL. Examples of some popular systems that support such 
relational data analytics in the cloud are Apache Spark/SQL, Apache Hive, 
Apache Impala, etc. In this post, we discuss the important first step in 
relational data processing, which is the reading of input data 
tables.</summary></entry><entry><title type="html">Sparksummit</title><link 
href="http://crail.incubator.apache.org//blog/2018/06/sparksummit.html"; 
rel="alternate" 
 type="text/html" title="Sparksummit" 
/><published>2018-06-05T00:00:00+02:00</published><updated>2018-06-05T00:00:00+02:00</updated><id>http://crail.incubator.apache.org//blog/2018/06/sparksummit</id><content
 type="html" 
xml:base="http://crail.incubator.apache.org//blog/2018/06/sparksummit.html";>&lt;p&gt;A
 Spark serverless architecture powered by Crail will be presented today at the 
&lt;a 
href=&quot;https://databricks.com/session/serverless-machine-learning-on-modern-hardware-using-apache-spark&quot;&gt;Spark
 Summit&lt;/a&gt;&lt;/p&gt;</content><author><name></name></author><category 
term="news" /><summary type="html">A Spark serverless architecture powered by 
Crail will be presented today at the Spark 
Summit</summary></entry><entry><title type="html">Dataworks</title><link 
href="http://crail.incubator.apache.org//blog/2018/06/dataworks.html"; 
rel="alternate" type="text/html" title="Dataworks" 
/><published>2018-06-05T00:00:00+02:00</published><updated>2018-06-05T00:00:00+02:00</update
 d><id>http://crail.incubator.apache.org//blog/2018/06/dataworks</id><content 
type="html" 
xml:base="http://crail.incubator.apache.org//blog/2018/06/dataworks.html";>&lt;p&gt;Apache
 Crail (incubating) to feature in the &lt;a 
href=&quot;https://dataworkssummit.com/san-jose-2018/session/data-processing-at-the-speed-of-100-gbpsapache-crail-incubating/&quot;&gt;DataWorks
 Summit&lt;/a&gt; on June 
21st&lt;/p&gt;</content><author><name></name></author><category term="news" 
/><summary type="html">Apache Crail (incubating) to feature in the DataWorks 
Summit on June 21st</summary></entry><entry><title type="html">Apache 
Release</title><link 
href="http://crail.incubator.apache.org//blog/2018/06/apache-release.html"; 
rel="alternate" type="text/html" title="Apache Release" 
/><published>2018-06-04T00:00:00+02:00</published><updated>2018-06-04T00:00:00+02:00</updated><id>http://crail.incubator.apache.org//blog/2018/06/apache-release</id><content
 type="html" xml:base="http://crail.incubator.apache.org/
 /blog/2018/06/apache-release.html">&lt;p&gt;Apache Crail 1.0 incubator &lt;a 
href=&quot;//crail.incubator.apache.org/download&quot;&gt;release&lt;/a&gt;&lt;/p&gt;</content><author><name></name></author><category
 term="news" /><summary type="html">Apache Crail 1.0 incubator 
release</summary></entry><entry><title type="html">Apache</title><link 
href="http://crail.incubator.apache.org//blog/2018/01/apache.html"; 
rel="alternate" type="text/html" title="Apache" 
/><published>2018-01-22T00:00:00+01:00</published><updated>2018-01-22T00:00:00+01:00</updated><id>http://crail.incubator.apache.org//blog/2018/01/apache</id><content
 type="html" 
xml:base="http://crail.incubator.apache.org//blog/2018/01/apache.html";>&lt;p&gt;Crail
 is now an Apache Incubator 
project!&lt;/p&gt;</content><author><name></name></author><category term="news" 
/><summary type="html">Crail is now an Apache Incubator 
project!</summary></entry><entry><title type="html">Iops</title><link 
href="http://crail.incubator.apache.org/
 /blog/2017/11/iops.html" rel="alternate" type="text/html" title="Iops" 
/><published>2017-11-23T00:00:00+01:00</published><updated>2017-11-23T00:00:00+01:00</updated><id>http://crail.incubator.apache.org//blog/2017/11/iops</id><content
 type="html" 
xml:base="http://crail.incubator.apache.org//blog/2017/11/iops.html";>&lt;p&gt;New
 blog &lt;a 
href=&quot;//crail.incubator.apache.org/blog/2017/11/crail-metadata.html&quot;&gt;post&lt;/a&gt;
 about Crail’s metadata performance and 
scalability&lt;/p&gt;</content><author><name></name></author><category 
term="news" /><summary type="html">New blog post about Crail’s metadata 
performance and scalability</summary></entry><entry><title type="html">Crail 
Storage Performance – Part III: Metadata</title><link 
href="http://crail.incubator.apache.org//blog/2017/11/crail-metadata.html"; 
rel="alternate" type="text/html" title="Crail Storage Performance -- Part III: 
Metadata" 
/><published>2017-11-21T00:00:00+01:00</published><updated>2017-11-21T00:00:0
 
0+01:00</updated><id>http://crail.incubator.apache.org//blog/2017/11/crail-metadata</id><content
 type="html" 
xml:base="http://crail.incubator.apache.org//blog/2017/11/crail-metadata.html";>&lt;div
 style=&quot;text-align: justify&quot;&gt;
-&lt;p&gt;
-This is part III of our series of posts discussing Crail's raw storage 
performance. This part is about Crail's metadata performance and scalability.
-&lt;/p&gt;
-&lt;/div&gt;
-
-&lt;h3 id=&quot;hardware-configuration&quot;&gt;Hardware 
Configuration&lt;/h3&gt;
-
-&lt;p&gt;The specific cluster configuration used for the experiments in this 
blog:&lt;/p&gt;
-
-&lt;ul&gt;
-  &lt;li&gt;Cluster
-    &lt;ul&gt;
-      &lt;li&gt;8 node x86_64 cluster&lt;/li&gt;
-    &lt;/ul&gt;
-  &lt;/li&gt;
-  &lt;li&gt;Node configuration
-    &lt;ul&gt;
-      &lt;li&gt;CPU: 2 x Intel(R) Xeon(R) CPU E5-2690 0 @ 2.90GHz&lt;/li&gt;
-      &lt;li&gt;DRAM: 96GB DDR3&lt;/li&gt;
-      &lt;li&gt;Network: 1x100Gbit/s Mellanox ConnectX-5&lt;/li&gt;
-    &lt;/ul&gt;
-  &lt;/li&gt;
-  &lt;li&gt;Software
-    &lt;ul&gt;
-      &lt;li&gt;Ubuntu 16.04.3 LTS (Xenial Xerus) with Linux kernel version 
4.10.0-33-generic&lt;/li&gt;
-      &lt;li&gt;Crail 1.0, internal version 2993&lt;/li&gt;
-    &lt;/ul&gt;
-  &lt;/li&gt;
-&lt;/ul&gt;
-
-&lt;h3 id=&quot;crail-metadata-operation-overview&quot;&gt;Crail Metadata 
Operation Overview&lt;/h3&gt;
-
-&lt;div style=&quot;text-align: justify&quot;&gt; 
-&lt;p&gt;
-As described in &lt;a 
href=&quot;//crail.incubator.apache.org/blog/2017/08/crail-memory.html&quot;&gt;part
 I&lt;/a&gt;, Crail data operations are composed of actual data transfers and 
metadata operations. Examples of metadata operations are operations for 
creating or modifying the state of a file, or operations to lookup the storage 
server that stores a particular range (block) of a file. In Crail, all the 
metadata is managed by the namenode(s) (as opposed to the data which is managed 
by the storage nodes). Clients interact with Crail namenodes via Remote 
Procedure Calls (RPCs). Crail supports multiple RPC protocols for different 
types of networks and also offers a pluggable RPC interface so that new RPC 
bindings can be implemented easily. On RDMA networks, the default DaRPC (&lt;a 
href=&quot;https://dl.acm.org/citation.cfm?id=2670994&quot;&gt;DaRPC 
paper&lt;/a&gt;, &lt;a href=&quot;http://github.com/zrlio/darpc&quot;&gt;DaRPC 
GitHub&lt;/a&gt;) based RPC binding provides the best pe
 rformance. The figure below gives an overview of the Crail metadata processing 
in a DaRPC configuration. 
-&lt;/p&gt;
-&lt;/div&gt;
-
-&lt;div style=&quot;text-align:center&quot;&gt;&lt;img 
src=&quot;//crail.incubator.apache.org/img/blog/crail-metadata/rpc.png&quot; 
width=&quot;480&quot; /&gt;&lt;/div&gt;
-&lt;p&gt;&lt;br /&gt;&lt;/p&gt;
-
-&lt;div style=&quot;text-align: justify&quot;&gt; 
-&lt;p&gt;
-Crail supports partitioning of metadata across several namenods. Thereby, 
metadata operations issued by clients are hashed to a particular namenode 
depending on the name of object the operation attempts to create or retrieve. 
With the DaRPC binding, RPC messages are exchanged using RDMA send/recv 
operations. At the server, RPC processing is parallelized across different 
cores. To minimize locking and cache contention, each core handles a disjoint 
set of client connections. Connections assigned to the same core share the same 
RDMA completion queue which is processed exclusively by that given core. All 
the network queues, including send-, recv- and completion queues are mapped 
into user-space and accessed directly from within the JVM process. Since Crail 
offers a hierarchical storage namespace, metadata operations to create, delete 
or rename new storage resources effectively result in modifications to a 
tree-like data structure at the namenode. These structural operations require a 
so
 mewhat more expensive locking than the more lightweight operations used to 
lookup the file status or to extend a file with a new storage block. 
Consequently, Crail namenodes use two separate data structures to manage 
metadata: (a) a basic tree data structure that requires directory-based 
locking, and (b) a fast lock-free map to lookup of storage resources that are 
currently being read or written.
-&lt;/p&gt;
-&lt;/div&gt;
-
-&lt;h3 id=&quot;experimental-setup&quot;&gt;Experimental Setup&lt;/h3&gt;
-
-&lt;div style=&quot;text-align: justify&quot;&gt; 
-&lt;p&gt;
-In two of the previous blogs (&lt;a 
href=&quot;//crail.incubator.apache.org/blog/2017/08/crail-memory.html&quot;&gt;DRAM&lt;/a&gt;,
 &lt;a 
href=&quot;//crail.incubator.apache.org/blog/2017/08/crail-nvme-fabrics-v1.html&quot;&gt;NVMf&lt;/a&gt;)
 we have already shown that Crail metadata operations are very low latency. 
Essentially a single metadata operation issued by a remote client takes 5-6 
microseconds, which is only slightly more than the raw network latency of the 
RDMA network fabric. In this blog, we want to explore the scalability of 
Crail's metadata management, that is, the number of clients Crail can support, 
or how Crail scales as the cluster size increases. The level of scalability of 
Crail is mainly determined by the number of metadata operations Crail can 
process concurrently, a metric that is often referred to as IOPS. The higher 
the number of IOPS the system can handle, the more clients can concurrently use 
Crail without performance loss. 
-&lt;/p&gt;
-&lt;p&gt;
-An important metadata operation is ''getFile()'', which is used by clients to 
lookup the status of a file (whether the file exists, what size it has, etc.). 
The ''getFile()'' operation is served by Crail's fast lock-free map and in 
spirit is very similar to the ''getBlock()'' metadata operation (used by 
clients to query which storage nodes holds a particular block). In a typical 
Crail use case, ''getFile()'' and ''getBlock()'' are responsible for the peak 
metadata load at a namenode. In this experiment, we measure the achievable IOPS 
on the server side in an artificial configuration with many clients distributed 
across the cluster issuing ''getFile()'' in a tight loop. Note that the client 
side RPC interface in Crail is asynchronous, thus, clients can issue multiple 
metadata operations without blocking while asynchronously waiting for the 
result. In the experiments below, each client may have a maximum of 128 
''getFile()'' operations outstanding at any point in time. In a practical 
 scenario, Crail clients may also have multiple metadata operations in flight 
either because clients are shared by different cores, or because Crail 
interleaves metadata and data operations (see &lt;a 
href=&quot;//crail.incubator.apache.org/blog/2017/08/crail-memory.html&quot;&gt;DRAM&lt;/a&gt;).
 What makes the benchmark artificial is that clients exclusively focus on 
generating load for the namenode and thereby are neither performing data 
operations nor are they doing any compute. The basic command of the benchmark 
as executed by each of the individual clients is given by the following command:
-&lt;/p&gt;
-&lt;/div&gt;
-&lt;div class=&quot;highlighter-rouge&quot;&gt;&lt;div 
class=&quot;highlight&quot;&gt;&lt;pre 
class=&quot;highlight&quot;&gt;&lt;code&gt;./bin/crail iobench -t 
getMultiFileAsync -f / -k 10000000 -b 128
-&lt;/code&gt;&lt;/pre&gt;&lt;/div&gt;&lt;/div&gt;
-&lt;div style=&quot;text-align: justify&quot;&gt; 
-&lt;p&gt;
-Where ''-t'' specifies the benchmark to run, ''-f'' specifies the path on the
-Crail file system to be used for the benchmark, ''-k'' specifies the number of
-iterations to be performed by the benchmark
-(how many times will the benchmark execute ''getFile()'') and
-''-b'' specifies the maximum number of requests in flight.
-&lt;/p&gt;
-&lt;/div&gt;
-
-&lt;h3 id=&quot;single-namenode-scalability&quot;&gt;Single Namenode 
Scalability&lt;/h3&gt;
-
-&lt;div style=&quot;text-align: justify&quot;&gt; 
-&lt;p&gt;
-In the first experiment, we measure the aggregated number of metadata 
operations a single Crail namenode can handle per second. The namenode runs on 
8 physical cores with hyper-threading disabled. The result is shown in the 
first graph below, labeled ''Namenode IOPS''. The namenode only gets saturated 
with more than 16 clients. The graph shows that the namenode can handle close 
to 10 million ''getFile()'' operations per second. With significantly more 
clients, the overall number of IOPS drops slightly, as more resources are being 
allocated on the single RDMA card, which basically creates a contention on 
hardware resources.
-&lt;/p&gt;
-&lt;p&gt; 
-As comparison, we measure the raw number of IOPS, which can be executed on the 
RDMA network. We measure the raw number using ib_send_bw. We configured 
ib_send_bw with the same parameters in terms of RDMA configuration as the 
namenode. This means, we instructed ib_send_bw not to do CQ moderation, and to 
use a receive queue and a send queue of length 32, which equals the length of 
the namenode queues. Note that the default configuration of ib_send_bw uses CQ 
moderation and does preposting of send operations, which can only be done, if 
the operation is known in advance. This is not the case in a real system, like 
crail's namenode. The basic ib_send_bw command is given below:
-&lt;/p&gt;
-&lt;/div&gt;
-&lt;div class=&quot;highlighter-rouge&quot;&gt;&lt;div 
class=&quot;highlight&quot;&gt;&lt;pre 
class=&quot;highlight&quot;&gt;&lt;code&gt;ib_send_bw -s 1 -Q 1 -r 32 -t 32 -n 
10000000
-&lt;/code&gt;&lt;/pre&gt;&lt;/div&gt;&lt;/div&gt;
-&lt;div style=&quot;text-align: justify&quot;&gt; 
-&lt;p&gt;
-Where ''-s 1'' specifies to send packets with a payload of 1 (we don't want to
-measure the transmission time of data, just the number of I/O operations),
-''-Q 1'' specifies not to do CQ moderation, ''-r 32'' specifies the receive
-queue length to be 32, ''-t 32'' specifies the send queue length to be 32
-and ''-n'' specifies the number of
-iterations to be performed by ib_send_bw.
-&lt;/p&gt;
-&lt;/div&gt;
-&lt;div style=&quot;text-align: justify&quot;&gt; 
-&lt;p&gt;
-The line of the raw number of IOPS, labeled ''ib send'' is shown in the same 
graph. With this measurement we show that Crail's namenode IOPS are similar to 
the raw ib_send_bw IOPS with the same configuration.
-&lt;/p&gt;
-&lt;/div&gt;
-&lt;p&gt;&lt;br /&gt;&lt;/p&gt;
-&lt;div style=&quot;text-align:center&quot;&gt;&lt;img 
src=&quot;//crail.incubator.apache.org/img/blog/crail-metadata/namenode_ibsend_iops64.svg&quot;
 width=&quot;550&quot; /&gt;&lt;/div&gt;
-&lt;p&gt;&lt;br /&gt;&lt;/p&gt;
-&lt;div style=&quot;text-align: justify&quot;&gt; 
-&lt;p&gt;
-If one starts ib_send_bw without specifying the queue sizes or whether or not 
to use CQ moderation, the raw number of IOPS might be higher. This is due to 
the fact, that the default values of ib_send_bw use a receive queue of 512, a 
send queue of 128 and CQ moderation of 100, meaning that a new completion is 
generated only after 100 sends. As comparison, we did this
-measurement too and show the result, labeled 'ib_send CQ mod', in the same 
graph. Fine tuning of receive and send queue sizes, CQ moderation size, 
postlists etc might lead to a higher number of IOPS. 
-&lt;/p&gt;
-&lt;/div&gt;
-
-&lt;h3 id=&quot;multiple-namenode-scalability&quot;&gt;Multiple Namenode 
Scalability&lt;/h3&gt;
-
-&lt;div style=&quot;text-align: justify&quot;&gt; 
-&lt;p&gt;
-To increase the number of IOPS the overall system can handle, we allow 
starting multiple namenode instances. Hot metadata operations, such as 
''getFile()'', are distributed over all running instances of the namenode. 
''getFile()'' is implemented such that no synchronization among the namenodes 
is required. As such, we expect good scalability. The graph below compares the 
overall IOPS of a system with one namenode to a system with two namenodes and 
four namenodes.
-&lt;/p&gt;
-&lt;/div&gt;
-&lt;p&gt;&lt;br /&gt;&lt;/p&gt;
-&lt;div style=&quot;text-align:center&quot;&gt;&lt;img 
src=&quot;//crail.incubator.apache.org/img/blog/crail-metadata/namenode_multi64.svg&quot;
 width=&quot;550&quot; /&gt;&lt;/div&gt;
-&lt;p&gt;&lt;br /&gt;&lt;/p&gt;
-
-&lt;div style=&quot;text-align: justify&quot;&gt; 
-&lt;p&gt;
-We show in this graph that the system can handle around 17Mio IOPS with two 
namenodes and 28Mio IOPS with four namenodes (with more than 64 clients we 
measured the number of IOPS to be slightly higher than 30Mio IOPS). Having 
multiple namenode instances matters especially with a higher number of clients. 
In the graph we see that the more clients we have the more we can benefit from 
a second namenode instance or even more instances.
-&lt;/p&gt;
-&lt;/div&gt;
-
-&lt;div style=&quot;text-align: justify&quot;&gt; 
-&lt;p&gt;
-We only have 7 physical nodes available to run the client processes. This
-means, after 7 client processes, processes start sharing a physical machine.
-With 64 client processes, each machine runs 9 (10 in one case) client
-instances, which share the cores and the resources of the RDMA hardware.
-We believe this is the reason, why the graphs appear not to scale linearly.
-The number of total IOPS is client-bound, not namenode-bound.
-With more physical machines, we believe that scalability could be shown
-much better. Again, there is absolutely no communication among the
-namenodes happening, which should lead to linear scalability.
-&lt;/p&gt;
-&lt;/div&gt;
-
-&lt;h3 id=&quot;cluster-sizes&quot;&gt;Cluster sizes&lt;/h3&gt;
-
-&lt;div style=&quot;text-align: justify&quot;&gt; 
-&lt;p&gt;
-Let us look at a concrete application, which ideally runs on a large cluster:
-TeraSort. In a previous blog, &lt;a 
href=&quot;//crail.incubator.apache.org/blog/2017/01/sorting.html&quot;&gt;sorting&lt;/a&gt;,
-we analyze performance characteristics of TeraSort on Crail on a big cluster
-of 128 nodes, where we run 384 executors in total. This already proves that
-Crail can at least handle 384 clients. Now we analyze the theoretical number
-of clients without performance loss at the namenode. Still this theoretical
-number is not a hard limit on the number of clients. Just adding more
-clients would start dropping the number of IOPS per client (not at the
-namenode).
-&lt;/p&gt;
-&lt;/div&gt;
-
-&lt;div style=&quot;text-align: justify&quot;&gt; 
-&lt;p&gt;
-In contrast to the benchmarks above, a real-world application, like TeraSort,
-does not issue RPC requests in a tight loop. It rather does sorting
-(computation), file reading and writing and and of course a certain amount of
-RPCs to manage the files.
-&lt;/p&gt;
-&lt;/div&gt;
-
-&lt;div style=&quot;text-align: justify&quot;&gt; 
-&lt;p&gt;
-We would like to know how many RPCs a run of TeraSort generates and therefore
-how big the load in terms of number of IOPS is at the namenode for a
-real-world application.
-We run TeraSort on a data set of 200GB and measured the
-number of IOPS at the namenode with 4 executors, 8 executors and 12 executors.
-Every executor runs 12 cores. For this experiment, we use a single namenode
-instance. We plot the distribution of the number of IOPS measured at the
-namenode over the elapsed runtime of the TeraSort application.
-&lt;/p&gt;
-&lt;/div&gt;
-
-&lt;p&gt;&lt;br /&gt;&lt;/p&gt;
-&lt;div style=&quot;text-align:center&quot;&gt;&lt;img 
src=&quot;//crail.incubator.apache.org/img/blog/crail-metadata/terasort_iops.svg&quot;
 width=&quot;550&quot; /&gt;&lt;/div&gt;
-&lt;p&gt;&lt;br /&gt;&lt;/p&gt;
-
-&lt;div style=&quot;text-align: justify&quot;&gt; 
-&lt;p&gt;
-From the graph we pick the peak number of IOPS measured
-throughout the execution time for all three cases. The following table
-shows the three peak IOPS numbers:
-&lt;/p&gt;
-&lt;/div&gt;
-
-&lt;p&gt;&lt;br /&gt;&lt;/p&gt;
-&lt;center&gt;
-&lt;table&gt;
-  &lt;thead&gt;
-    &lt;tr&gt;
-      &lt;th&gt;#Executor nodes&lt;/th&gt;
-      &lt;th&gt;Measured IOPS&lt;/th&gt;
-      &lt;th&gt;% of single namenode&lt;/th&gt;
-    &lt;/tr&gt;
-  &lt;/thead&gt;
-  &lt;tbody&gt;
-    &lt;tr&gt;
-      &lt;td align=&quot;right&quot;&gt;4&lt;/td&gt;
-      &lt;td align=&quot;right&quot;&gt;32k&lt;/td&gt;
-      &lt;td align=&quot;right&quot;&gt;0.32%&lt;/td&gt;
-    &lt;/tr&gt;
-    &lt;tr&gt;
-      &lt;td align=&quot;right&quot;&gt;8&lt;/td&gt;
-      &lt;td align=&quot;right&quot;&gt;67k&lt;/td&gt;
-      &lt;td align=&quot;right&quot;&gt;0.67%&lt;/td&gt;
-    &lt;/tr&gt;
-    &lt;tr&gt;
-      &lt;td align=&quot;right&quot;&gt;12&lt;/td&gt;
-      &lt;td align=&quot;right&quot;&gt;107k&lt;/td&gt;
-      &lt;td align=&quot;right&quot;&gt;1.07%&lt;/td&gt;
-    &lt;/tr&gt;
-  &lt;/tbody&gt;
-&lt;/table&gt;
-&lt;/center&gt;
-&lt;p&gt;&lt;br /&gt;&lt;/p&gt;
-
-&lt;div style=&quot;text-align: justify&quot;&gt; 
-&lt;p&gt;
-From this table we see that it scales linearly. Even more important,
-we notice that with 12 nodes we still use only around 1% of the
-number of IOPS a single namenode can handle.
-If we extrapolate this to a
-100%, we can handle a cluster size of almost 1200 nodes (1121 clients being 
just
-below 10Mio IOPS at the namenode). The
-extrapolated numbers would look like this:
-&lt;/p&gt;
-&lt;/div&gt;
-
-&lt;p&gt;&lt;br /&gt;&lt;/p&gt;
-&lt;center&gt;
-&lt;table&gt;
-  &lt;thead&gt;
-    &lt;tr&gt;
-      &lt;th&gt;#Namenodes&lt;/th&gt;
-      &lt;th&gt;Max IOPS by  namenodes&lt;/th&gt;
-      &lt;th&gt;#Executor nodes&lt;/th&gt;
-      &lt;th&gt;Extrapolated IOPS&lt;/th&gt;
-      &lt;th&gt;% of all namenodes&lt;/th&gt;
-    &lt;/tr&gt;
-  &lt;/thead&gt;
-  &lt;tbody&gt;
-    &lt;tr&gt;
-      &lt;td align=&quot;right&quot;&gt;1&lt;/td&gt;
-      &lt;td align=&quot;right&quot;&gt;10000k&lt;/td&gt;
-      &lt;td align=&quot;right&quot;&gt;1121&lt;/td&gt;
-      &lt;td align=&quot;right&quot;&gt;9996k&lt;/td&gt;
-      &lt;td align=&quot;right&quot;&gt;99.96%&lt;/td&gt;
-    &lt;/tr&gt;
-    &lt;tr&gt;
-      &lt;td align=&quot;right&quot;&gt;1&lt;/td&gt;
-      &lt;td align=&quot;right&quot;&gt;10000k&lt;/td&gt;
-      &lt;td align=&quot;right&quot;&gt;1200&lt;/td&gt;
-      &lt;td align=&quot;right&quot;&gt;10730k&lt;/td&gt;
-      &lt;td align=&quot;right&quot;&gt;107.3%&lt;/td&gt;
-    &lt;/tr&gt;
-    &lt;tr&gt;
-      &lt;td align=&quot;right&quot;&gt;2&lt;/td&gt;
-      &lt;td align=&quot;right&quot;&gt;17000k&lt;/td&gt;
-      &lt;td align=&quot;right&quot;&gt;1906&lt;/td&gt;
-      &lt;td align=&quot;right&quot;&gt;16995k&lt;/td&gt;
-      &lt;td align=&quot;right&quot;&gt;99.97%&lt;/td&gt;
-    &lt;/tr&gt;
-    &lt;tr&gt;
-      &lt;td align=&quot;right&quot;&gt;4&lt;/td&gt;
-      &lt;td align=&quot;right&quot;&gt;30000k&lt;/td&gt;
-      &lt;td align=&quot;right&quot;&gt;3364&lt;/td&gt;
-      &lt;td align=&quot;right&quot;&gt;29995k&lt;/td&gt;
-      &lt;td align=&quot;right&quot;&gt;99.98%&lt;/td&gt;
-    &lt;/tr&gt;
-&lt;/tbody&gt;
-&lt;/table&gt;
-&lt;/center&gt;
-&lt;p&gt;&lt;br /&gt;&lt;/p&gt;
-
-&lt;div style=&quot;text-align: justify&quot;&gt; 
-&lt;p&gt;
-Of course we know that there is no system with perfect linear scalability.
-But even if we would loose 50% of the number of IOPS (compared to the
-theoretical maximum) on a big cluster, Crail could still handle a cluster size
-of 600 nodes and a single namenode without any performance loss at the
-namenode.
-Should we still want to run an application like TeraSort on a bigger cluster,
-we can add a second namenode or have even more instances of namenodes
-to ensure that clients do not suffer from contention in terms of IOPS at
-the namenode.
-&lt;/p&gt;
-&lt;/div&gt;
-
-&lt;div style=&quot;text-align: justify&quot;&gt;
-&lt;p&gt;
-We believe that the combination of benchmarks above, the scalability
-experiments and the real-world
-application of TeraSort shows clearly that Crail and Crail's namenode can 
handle
-a big cluster of at least several hundreds of nodes, theoretically up to
-1200 nodes with a single namenode and even more with multiple namenodes.
-&lt;/p&gt;
-&lt;/div&gt;
-
-&lt;h3 id=&quot;system-comparison&quot;&gt;System comparison&lt;/h3&gt;
-&lt;div style=&quot;text-align: justify&quot;&gt;
-&lt;p&gt;
-In this section we compare the number of IOPS Crail can handle to
-two other systems:
-&lt;a href=&quot;http://hadoop.apache.org/&quot;&gt;Hadoop's HDFS 
namenode&lt;/a&gt; and
-&lt;a 
href=&quot;https://ramcloud.atlassian.net/wiki/spaces/RAM/overview&quot;&gt;RAMCloud&lt;/a&gt;.
-&lt;/p&gt;
-&lt;/div&gt;
-
-&lt;div style=&quot;text-align: justify&quot;&gt;
-&lt;p&gt;
-HDFS is a well known distributed file system. Like Crail, HDFS runs
-a namenode and several datanodes. The namenode implements similar functionality
-as Crail's namenode, while HDFS's datanodes provide additional functionality,
-like replication, for example. We are interested in the
-number of IOPS the namenode can handle. As such, the datanode's functionality
-is not relevant for this experiment. HDFS is implemented in Java like Crail.
-Due to this high similarity in terms of functionality and language used to
-implement the system, HDFS is a good candidate to compare Crail to.
-&lt;/p&gt;
-&lt;/div&gt;
-
-&lt;div style=&quot;text-align: justify&quot;&gt;
-&lt;p&gt;
-HDFS does not use RDMA to send RPCs. Instead, RPCs are sent over a regular
-IP network. In our case, it is the same 100Gbit/s ethernet-based RoCE network.
-&lt;/p&gt;
-&lt;/div&gt;
-
-&lt;div style=&quot;text-align: justify&quot;&gt;
-&lt;p&gt;
-To measure the number of IOPS HDFS's namenode can handle, we run the same
-experiment as for Crail. The clients issue a ''getFile()'' RPC to the
-namenode and we vary the number of clients from 1 to 64. The following
-plot shows the number of IOPS relative to the number of clients.
-&lt;/p&gt;
-&lt;/div&gt;
-
-&lt;p&gt;&lt;br /&gt;&lt;/p&gt;
-&lt;div style=&quot;text-align:center&quot;&gt;&lt;img 
src=&quot;//crail.incubator.apache.org/img/blog/crail-metadata/namenode_hdfs_iops.svg&quot;
 width=&quot;550&quot; /&gt;&lt;/div&gt;
-&lt;p&gt;&lt;br /&gt;&lt;/p&gt;
-
-&lt;div style=&quot;text-align: justify&quot;&gt;
-&lt;p&gt;
-The graph shows that the namenode can handle around 200000 IOPS. One reason
-for the difference to the number of IOPS of Crail is surely that HDFS does not
-use the capabilities offered by the RDMA network, while Crail does. However
-this cannot be the only reason, why the namenode cannot handle more than
-200000 IOPS. We would need to analyze more deeply where the bottleneck is
-to find an answer. We believe that the amount of code which
-gets executed at probably various layers of the software stack
-is too big to achieve high performance in terms of IOPS.
-&lt;/p&gt;
-&lt;/div&gt;
-
-&lt;div style=&quot;text-align: justify&quot;&gt;
-&lt;p&gt;
-RAMCloud is a fast key-value store, which makes use of the RDMA network
-to reach low latency and high throughput. It runs one master coordinator and
-and optionally several slave coordinators, which can take over, if the master
-coordinator fails. Coordinator persistence can be achieved
-by external persistent storage, like Zookeeper or LogCabin.
-RAMCloud runs several storage servers, which
-store key-value pairs in RAM. Optionally, replicas can be stored on secondary
-storage, which provides persistence. RAMCloud is implemented in C++. Therefore
-it is natively compiled code.
-&lt;/p&gt;
-&lt;/div&gt;
-
-&lt;div style=&quot;text-align: justify&quot;&gt;
-&lt;p&gt;
-We are interested in the number of IOPS RAMCloud can handle. We decided
-to run the readThroughput benchmark of RAMCloud's ClusterPerf program, which
-measures the number of object reads per second. This is probably the closest
-benchmark to the RPC benchmark of Crail and HDFS.
-&lt;/p&gt;
-&lt;/div&gt;
-
-&lt;div style=&quot;text-align: justify&quot;&gt;
-&lt;p&gt;
-For a fair comparison, we run RAMCloud without any persistence, so without
-Zookeeper and without replicas to secondary storage. We run one coordinator
-and one storage server, which is somewhat similar to running one namenode
-in the Crail and HDFS cases. Also, we wanted to vary the number of clients
-from 1 to 64. At the moment we can only get results for up to 16 clients.
-We asked the RAMCloud developers for possible reasons and got to know that the
-reason is a starvation bug in the benchmark (not in the RAMCloud system
-itself). The RAMCloud developers are looking into this issue. We will update
-the blog with the latest numbers as soon as the bug is fixed.
-&lt;/p&gt;
-&lt;/div&gt;
-
-&lt;p&gt;&lt;br /&gt;&lt;/p&gt;
-&lt;div style=&quot;text-align:center&quot;&gt;&lt;img 
src=&quot;//crail.incubator.apache.org/img/blog/crail-metadata/ramcloud_iops.svg&quot;
 width=&quot;550&quot; /&gt;&lt;/div&gt;
-&lt;p&gt;&lt;br /&gt;&lt;/p&gt;
-
-&lt;div style=&quot;text-align: justify&quot;&gt;
-&lt;p&gt;
-RAMCloud reaches a peak of 1.12Mio IOPS with 14 clients. The utilization of the
-dispatcher thread is at 100% already with 10 clients. Even with more clients,
-the number of IOPS won't get higher than 1.12Mio, because the
-dispatcher thread is the bottleneck, as can be seen in the graph.
-In addition, we got a confirmation from the developers that more than
-10 clients will not increase the number of IOPS.
-So we think that the measurements are not unfair, even if we do not have
-results for more than 16 clients. Again, we we will update the blog
-with a higher number of clients, as soon as the bug is fixed.
-&lt;/p&gt;
-&lt;/div&gt;
-
-&lt;div style=&quot;text-align: justify&quot;&gt;
-&lt;p&gt;
-Let us now summarize the number of IOPS of all three systems in one plot
-below. For a fair comparison, Crail runs only one namenode for this
-experiments and we compare the results to RAMCloud with one coordinator and
-one storage server (without replication as described above) and the one
-namenode instance of HDFS. We see that Crail's single namenode can handle
-a much bigger number of RPCs compared to the other two systems (remember
-that Crail can run multiple namenodes and we measured a number of IOPS
-of 30Mio/s with 4 namenodes).
-&lt;/p&gt;
-&lt;/div&gt;
-
-&lt;p&gt;&lt;br /&gt;&lt;/p&gt;
-&lt;div style=&quot;text-align:center&quot;&gt;&lt;img 
src=&quot;//crail.incubator.apache.org/img/blog/crail-metadata/max_iops_crail_hdfs_ramcloud.svg&quot;
 width=&quot;550&quot; /&gt;&lt;/div&gt;
-&lt;p&gt;&lt;br /&gt;&lt;/p&gt;
-
-&lt;div style=&quot;text-align: justify&quot;&gt;
-&lt;p&gt;
-HDFS is deployed on production clusters and handles real workloads
-with roughly 200000 IOPS. We believe that Crail, which can handle a much
-bigger number of IOPS, is able to run real workloads on very large
-clusters. A common assumption is that Java-based implementations suffer from
-performance loss. We show that a Java-based system can handle a high amount
-of operations even compared to a C++-based system like RAMCloud.
-&lt;/p&gt;
-&lt;/div&gt;
-
-&lt;h3 id=&quot;summary&quot;&gt;Summary&lt;/h3&gt;
-
-&lt;div style=&quot;text-align: justify&quot;&gt; 
-&lt;p&gt;
-In this blog we show three key points of Crail: First, Crail's namenode 
performs the same as ib_send_bw with realistic parameters in terms of IOPS. 
This shows that the actual processing of the RPC is implemented efficiently. 
Second, with only one namenode, Crail performs 10x to 50x better than RAMCloud 
and HDFS, two popular systems, where RAMCloud is RDMA-based and implemented 
natively. Third, Crail's metadata service can be scaled out to serve large 
number of clients. We have shown that Crail offers near linear scaling with up 
to 4 namenodes, offering a performance that is sufficient to serve several 
1000s of clients. 
-&lt;/p&gt;
-&lt;/div&gt;</content><author><name>Adrian Schuepbach and Patrick 
Stuedi</name></author><category term="blog" /><summary type="html">This is part 
III of our series of posts discussing Crail's raw storage performance. This 
part is about Crail's metadata performance and 
scalability.</summary></entry><entry><title type="html">Floss</title><link 
href="http://crail.incubator.apache.org//blog/2017/11/floss.html"; 
rel="alternate" type="text/html" title="Floss" 
/><published>2017-11-17T00:00:00+01:00</published><updated>2017-11-17T00:00:00+01:00</updated><id>http://crail.incubator.apache.org//blog/2017/11/floss</id><content
 type="html" 
xml:base="http://crail.incubator.apache.org//blog/2017/11/floss.html";>&lt;p&gt;Crail
 features in the &lt;a 
href=&quot;https://twit.tv/shows/floss-weekly/episodes/458?autostart=false&quot;&gt;FLOSS
 weekly 
podcast&lt;/a&gt;&lt;/p&gt;</content><author><name></name></author><category 
term="news" /><summary type="html">Crail features in the FLOSS weekly 
podcast</sum
 mary></entry><entry><title type="html">Blog</title><link 
href="http://crail.incubator.apache.org//blog/2017/11/blog.html"; 
rel="alternate" type="text/html" title="Blog" 
/><published>2017-11-17T00:00:00+01:00</published><updated>2017-11-17T00:00:00+01:00</updated><id>http://crail.incubator.apache.org//blog/2017/11/blog</id><content
 type="html" 
xml:base="http://crail.incubator.apache.org//blog/2017/11/blog.html";>&lt;p&gt;New
 blog &lt;a 
href=&quot;//crail.incubator.apache.org/blog/2017/11/rdmashuffle.html&quot;&gt;post&lt;/a&gt;
 about SparkRDMA and Crail shuffle 
plugins&lt;/p&gt;</content><author><name></name></author><category term="news" 
/><summary type="html">New blog post about SparkRDMA and Crail shuffle 
plugins</summary></entry></feed>
\ No newline at end of file
+<?xml version="1.0" encoding="utf-8"?><feed 
xmlns="http://www.w3.org/2005/Atom"; ><generator uri="https://jekyllrb.com/"; 
version="3.8.5">Jekyll</generator><link href="http://localhost:4000/feed.xml"; 
rel="self" type="application/atom+xml" /><link href="http://localhost:4000/"; 
rel="alternate" type="text/html" 
/><updated>2018-12-04T15:32:22+01:00</updated><id>http://localhost:4000/feed.xml</id><title
 type="html">The Apache Crail (Incubating) Project</title></feed>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-crail-website/blob/004e8a6d/content/files/crail-project-web.pdf
----------------------------------------------------------------------
diff --git a/content/files/crail-project-web.pdf 
b/content/files/crail-project-web.pdf
deleted file mode 100644
index a3a53e5..0000000
Binary files a/content/files/crail-project-web.pdf and /dev/null differ

http://git-wip-us.apache.org/repos/asf/incubator-crail-website/blob/004e8a6d/content/img/apache_incubator.png
----------------------------------------------------------------------
diff --git a/content/img/apache_incubator.png b/content/img/apache_incubator.png
deleted file mode 100644
index 987c79e..0000000
Binary files a/content/img/apache_incubator.png and /dev/null differ

http://git-wip-us.apache.org/repos/asf/incubator-crail-website/blob/004e8a6d/content/img/blog/crail-machine-learning/cocoa.svg
----------------------------------------------------------------------
diff --git a/content/img/blog/crail-machine-learning/cocoa.svg 
b/content/img/blog/crail-machine-learning/cocoa.svg
deleted file mode 100644
index 25f6565..0000000
--- a/content/img/blog/crail-machine-learning/cocoa.svg
+++ /dev/null
@@ -1,82 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<svg xmlns="http://www.w3.org/2000/svg"; 
xmlns:xlink="http://www.w3.org/1999/xlink"; width="136pt" height="155pt" 
viewBox="0 0 136 155" version="1.2">
-<defs>
-<g>
-<symbol overflow="visible" id="glyph0-0">
-<path style="stroke:none;" d=""/>
-</symbol>
-<symbol overflow="visible" id="glyph0-1">
-<path style="stroke:none;" d="M 5.640625 -2.71875 C 5.640625 -4.28125 4.46875 
-5.53125 3.046875 -5.53125 L 0.78125 -5.53125 L 0.78125 0 L 3.046875 0 C 
4.484375 0 5.640625 -1.21875 5.640625 -2.71875 Z M 4.859375 -2.71875 C 4.859375 
-1.25 3.875 -0.46875 2.828125 -0.46875 L 1.5625 -0.46875 L 1.5625 -5.0625 L 
2.828125 -5.0625 C 3.921875 -5.0625 4.859375 -4.1875 4.859375 -2.71875 Z M 
4.859375 -2.71875 "/>
-</symbol>
-<symbol overflow="visible" id="glyph0-2">
-<path style="stroke:none;" d="M 7.859375 -5.53125 L 7.25 -5.53125 L 6.46875 
-3.015625 C 6.421875 -2.859375 5.859375 -1.0625 5.8125 -0.640625 L 5.796875 
-0.640625 C 5.75 -1.046875 5.171875 -2.921875 5.15625 -2.96875 L 4.359375 
-5.53125 L 3.703125 -5.53125 L 2.953125 -3.140625 C 2.875 -2.875 2.34375 
-1.15625 2.28125 -0.640625 C 2.234375 -1.125 1.640625 -3.03125 1.625 -3.125 L 
0.875 -5.53125 L 0.125 -5.53125 L 1.84375 0 L 2.53125 0 L 3.34375 -2.671875 L 
3.734375 -4 C 3.859375 -4.421875 3.921875 -4.671875 3.953125 -4.890625 L 
3.96875 -4.890625 C 4.015625 -4.515625 4.21875 -3.859375 4.359375 -3.390625 L 
5.015625 -1.21875 L 5.40625 0 L 6.140625 0 Z M 7.859375 -5.53125 "/>
-</symbol>
-<symbol overflow="visible" id="glyph1-0">
-<path style="stroke:none;" d=""/>
-</symbol>
-<symbol overflow="visible" id="glyph1-1">
-<path style="stroke:none;" d="M 2.921875 -1.9375 C 2.921875 -2.234375 2.921875 
-2.9375 2.640625 -3.421875 C 2.328125 -3.96875 1.875 -4.046875 1.578125 
-4.046875 C 1.3125 -4.046875 0.84375 -3.96875 0.546875 -3.453125 C 0.265625 
-2.96875 0.25 -2.328125 0.25 -1.9375 C 0.25 -1.5 0.28125 -0.953125 0.53125 -0.5 
C 0.796875 -0.015625 1.234375 0.125 1.578125 0.125 C 2.1875 0.125 2.515625 
-0.21875 2.6875 -0.59375 C 2.90625 -1.015625 2.921875 -1.578125 2.921875 
-1.9375 Z M 2.40625 -2.015625 C 2.40625 -1.625 2.40625 -1.171875 2.25 -0.796875 
C 2.09375 -0.359375 1.78125 -0.265625 1.578125 -0.265625 C 1.328125 -0.265625 
1.046875 -0.40625 0.890625 -0.84375 C 0.78125 -1.203125 0.765625 -1.578125 
0.765625 -2.015625 C 0.765625 -2.578125 0.765625 -3.65625 1.578125 -3.65625 C 
2.40625 -3.65625 2.40625 -2.578125 2.40625 -2.015625 Z M 2.40625 -2.015625 "/>
-</symbol>
-<symbol overflow="visible" id="glyph1-2">
-<path style="stroke:none;" d="M 2.71875 0 L 2.71875 -0.375 L 1.921875 -0.375 L 
1.921875 -4.046875 L 1.796875 -4.046875 C 1.40625 -3.6875 0.90625 -3.671875 
0.546875 -3.65625 L 0.546875 -3.28125 C 0.78125 -3.28125 1.078125 -3.296875 
1.375 -3.421875 L 1.375 -0.375 L 0.59375 -0.375 L 0.59375 0 Z M 2.71875 0 "/>
-</symbol>
-</g>
-</defs>
-<g id="surface1">
-<path 
style="fill-rule:nonzero;fill:rgb(94.999695%,94.999695%,94.999695%);fill-opacity:1;stroke-width:1.19553;stroke-linecap:butt;stroke-linejoin:miter;stroke:rgb(19.999695%,19.999695%,19.999695%);stroke-opacity:1;stroke-miterlimit:10;"
 d="M 9.172656 9.920656 L -9.175 9.920656 C -9.585156 9.920656 -9.921094 
9.588625 -9.921094 9.174563 L -9.921094 -9.173094 C -9.921094 -9.587156 
-9.585156 -9.923094 -9.175 -9.923094 L 9.172656 -9.923094 C 9.586719 -9.923094 
9.922656 -9.587156 9.922656 -9.173094 L 9.922656 9.174563 C 9.922656 9.588625 
9.586719 9.920656 9.172656 9.920656 Z M 9.172656 9.920656 " 
transform="matrix(1,0,0,-1,67.925,11.073)"/>
-<g style="fill:rgb(19.999695%,19.999695%,19.999695%);fill-opacity:1;">
-  <use xlink:href="#glyph0-1" x="64.867" y="13.84"/>
-</g>
-<path 
style="fill-rule:nonzero;fill:rgb(94.999695%,94.999695%,94.999695%);fill-opacity:1;stroke-width:1.19553;stroke-linecap:butt;stroke-linejoin:miter;stroke:rgb(19.999695%,19.999695%,19.999695%);stroke-opacity:1;stroke-miterlimit:10;"
 d="M -47.51875 9.920656 L -65.866406 9.920656 C -66.280469 9.920656 -66.616406 
9.588625 -66.616406 9.174563 L -66.616406 -9.173094 C -66.616406 -9.587156 
-66.280469 -9.923094 -65.866406 -9.923094 L -47.51875 -9.923094 C -47.108594 
-9.923094 -46.772656 -9.587156 -46.772656 -9.173094 L -46.772656 9.174563 C 
-46.772656 9.588625 -47.108594 9.920656 -47.51875 9.920656 Z M -47.51875 
9.920656 " transform="matrix(1,0,0,-1,67.925,11.073)"/>
-<g style="fill:rgb(19.999695%,19.999695%,19.999695%);fill-opacity:1;">
-  <use xlink:href="#glyph0-2" x="5.397" y="13.251"/>
-</g>
-<g style="fill:rgb(19.999695%,19.999695%,19.999695%);fill-opacity:1;">
-  <use xlink:href="#glyph1-1" x="13.395" y="14.429"/>
-</g>
-<path 
style="fill-rule:nonzero;fill:rgb(94.999695%,94.999695%,94.999695%);fill-opacity:1;stroke-width:1.19553;stroke-linecap:butt;stroke-linejoin:miter;stroke:rgb(19.999695%,19.999695%,19.999695%);stroke-opacity:1;stroke-miterlimit:10;"
 d="M 65.867969 9.920656 L 47.520313 9.920656 C 47.10625 9.920656 46.770313 
9.588625 46.770313 9.174563 L 46.770313 -9.173094 C 46.770313 -9.587156 
47.10625 -9.923094 47.520313 -9.923094 L 65.867969 -9.923094 C 66.282031 
-9.923094 66.614063 -9.587156 66.614063 -9.173094 L 66.614063 9.174563 C 
66.614063 9.588625 66.282031 9.920656 65.867969 9.920656 Z M 65.867969 9.920656 
" transform="matrix(1,0,0,-1,67.925,11.073)"/>
-<g style="fill:rgb(19.999695%,19.999695%,19.999695%);fill-opacity:1;">
-  <use xlink:href="#glyph0-2" x="118.782" y="13.251"/>
-</g>
-<g style="fill:rgb(19.999695%,19.999695%,19.999695%);fill-opacity:1;">
-  <use xlink:href="#glyph1-2" x="126.78" y="14.429"/>
-</g>
-<path 
style="fill:none;stroke-width:0.59776;stroke-linecap:butt;stroke-linejoin:miter;stroke:rgb(0%,0%,0%);stroke-opacity:1;stroke-dasharray:0.59776,1.99255;stroke-miterlimit:10;"
 d="M 0.00078125 -11.016844 L 0.00078125 -144.669187 " 
transform="matrix(1,0,0,-1,67.925,11.073)"/>
-<path 
style="fill:none;stroke-width:0.59776;stroke-linecap:butt;stroke-linejoin:miter;stroke:rgb(0%,0%,0%);stroke-opacity:1;stroke-dasharray:0.59776,1.99255;stroke-miterlimit:10;"
 d="M -56.694531 -11.016844 L -56.694531 -144.669187 " 
transform="matrix(1,0,0,-1,67.925,11.073)"/>
-<path 
style="fill:none;stroke-width:0.59776;stroke-linecap:butt;stroke-linejoin:miter;stroke:rgb(0%,0%,0%);stroke-opacity:1;stroke-dasharray:0.59776,1.99255;stroke-miterlimit:10;"
 d="M 56.692188 -11.016844 L 56.692188 -144.669187 " 
transform="matrix(1,0,0,-1,67.925,11.073)"/>
-<path 
style="fill-rule:nonzero;fill:rgb(94.999695%,94.999695%,94.999695%);fill-opacity:1;stroke-width:1.19553;stroke-linecap:butt;stroke-linejoin:miter;stroke:rgb(19.999695%,19.999695%,19.999695%);stroke-opacity:1;stroke-miterlimit:10;"
 d="M 0.66875 -17.009031 L -0.671094 -17.009031 C -1.08125 -17.009031 -1.417187 
-17.341062 -1.417187 -17.755125 L -1.417187 -21.930906 C -1.417187 -22.341062 
-1.08125 -22.677 -0.671094 -22.677 L 0.66875 -22.677 C 1.082813 -22.677 1.41875 
-22.341062 1.41875 -21.930906 L 1.41875 -17.755125 C 1.41875 -17.341062 
1.082813 -17.009031 0.66875 -17.009031 Z M 0.66875 -17.009031 " 
transform="matrix(1,0,0,-1,67.925,11.073)"/>
-<path 
style="fill-rule:nonzero;fill:rgb(94.999695%,94.999695%,94.999695%);fill-opacity:1;stroke-width:1.19553;stroke-linecap:butt;stroke-linejoin:miter;stroke:rgb(19.999695%,19.999695%,19.999695%);stroke-opacity:1;stroke-miterlimit:10;"
 d="M -56.022656 -34.016844 L -57.3625 -34.016844 C -57.776562 -34.016844 
-58.1125 -34.348875 -58.1125 -34.762937 L -58.1125 -44.606687 C -58.1125 
-45.02075 -57.776562 -45.352781 -57.3625 -45.352781 L -56.022656 -45.352781 C 
-55.6125 -45.352781 -55.276562 -45.02075 -55.276562 -44.606687 L -55.276562 
-34.762937 C -55.276562 -34.348875 -55.6125 -34.016844 -56.022656 -34.016844 Z 
M -56.022656 -34.016844 " transform="matrix(1,0,0,-1,67.925,11.073)"/>
-<path 
style="fill-rule:nonzero;fill:rgb(94.999695%,94.999695%,94.999695%);fill-opacity:1;stroke-width:1.19553;stroke-linecap:butt;stroke-linejoin:miter;stroke:rgb(19.999695%,19.999695%,19.999695%);stroke-opacity:1;stroke-miterlimit:10;"
 d="M 57.364063 -34.016844 L 56.024219 -34.016844 C 55.610156 -34.016844 
55.278125 -34.348875 55.278125 -34.762937 L 55.278125 -44.606687 C 55.278125 
-45.02075 55.610156 -45.352781 56.024219 -45.352781 L 57.364063 -45.352781 C 
57.778125 -45.352781 58.110156 -45.02075 58.110156 -44.606687 L 58.110156 
-34.762937 C 58.110156 -34.348875 57.778125 -34.016844 57.364063 -34.016844 Z M 
57.364063 -34.016844 " transform="matrix(1,0,0,-1,67.925,11.073)"/>
-<path 
style="fill:none;stroke-width:0.79701;stroke-linecap:butt;stroke-linejoin:miter;stroke:rgb(12.998962%,28.999329%,52.999878%);stroke-opacity:1;stroke-miterlimit:10;"
 d="M 0.492969 -24.169187 L 53.633594 -32.141844 " 
transform="matrix(1,0,0,-1,67.925,11.073)"/>
-<path style=" 
stroke:none;fill-rule:nonzero;fill:rgb(12.998962%,28.999329%,52.999878%);fill-opacity:1;"
 d="M 124.125 43.597656 L 120.328125 40.929688 L 121.558594 43.210938 L 
119.714844 45.035156 "/>
-<path 
style="fill:none;stroke-width:0.79701;stroke-linecap:butt;stroke-linejoin:miter;stroke:rgb(12.998962%,28.999329%,52.999878%);stroke-opacity:1;stroke-miterlimit:10;"
 d="M -0.491406 -24.169187 L -53.635937 -32.141844 " 
transform="matrix(1,0,0,-1,67.925,11.073)"/>
-<path style=" 
stroke:none;fill-rule:nonzero;fill:rgb(12.998962%,28.999329%,52.999878%);fill-opacity:1;"
 d="M 11.726562 43.597656 L 16.136719 45.035156 L 14.289062 43.210938 L 
15.519531 40.929688 "/>
-<path 
style="fill:none;stroke-width:0.79701;stroke-linecap:butt;stroke-linejoin:miter;stroke:rgb(30.999756%,59.999084%,1.998901%);stroke-opacity:1;stroke-miterlimit:10;"
 d="M 3.059375 -54.817625 L 56.2 -46.844969 " 
transform="matrix(1,0,0,-1,67.925,11.073)"/>
-<path style=" 
stroke:none;fill-rule:nonzero;fill:rgb(30.999756%,59.999084%,1.998901%);fill-opacity:1;"
 d="M 68.417969 66.273438 L 72.832031 67.710938 L 70.984375 65.890625 L 
72.214844 63.609375 "/>
-<path 
style="fill:none;stroke-width:0.79701;stroke-linecap:butt;stroke-linejoin:miter;stroke:rgb(30.999756%,59.999084%,1.998901%);stroke-opacity:1;stroke-miterlimit:10;"
 d="M -3.057812 -54.817625 L -56.198437 -46.844969 " 
transform="matrix(1,0,0,-1,67.925,11.073)"/>
-<path style=" 
stroke:none;fill-rule:nonzero;fill:rgb(30.999756%,59.999084%,1.998901%);fill-opacity:1;"
 d="M 67.433594 66.273438 L 63.636719 63.609375 L 64.867188 65.890625 L 
63.019531 67.710938 "/>
-<path 
style="fill-rule:nonzero;fill:rgb(94.999695%,94.999695%,94.999695%);fill-opacity:1;stroke-width:1.19553;stroke-linecap:butt;stroke-linejoin:miter;stroke:rgb(19.999695%,19.999695%,19.999695%);stroke-opacity:1;stroke-miterlimit:10;"
 d="M 0.66875 -56.692625 L -0.671094 -56.692625 C -1.08125 -56.692625 -1.417187 
-57.028562 -1.417187 -57.442625 L -1.417187 -61.6145 C -1.417187 -62.028562 
-1.08125 -62.3645 -0.671094 -62.3645 L 0.66875 -62.3645 C 1.082813 -62.3645 
1.41875 -62.028562 1.41875 -61.6145 L 1.41875 -57.442625 C 1.41875 -57.028562 
1.082813 -56.692625 0.66875 -56.692625 Z M 0.66875 -56.692625 " 
transform="matrix(1,0,0,-1,67.925,11.073)"/>
-<path 
style="fill-rule:nonzero;fill:rgb(94.999695%,94.999695%,94.999695%);fill-opacity:1;stroke-width:1.19553;stroke-linecap:butt;stroke-linejoin:miter;stroke:rgb(19.999695%,19.999695%,19.999695%);stroke-opacity:1;stroke-miterlimit:10;"
 d="M -56.022656 -73.700437 L -57.3625 -73.700437 C -57.776562 -73.700437 
-58.1125 -74.036375 -58.1125 -74.450437 L -58.1125 -84.294187 C -58.1125 
-84.704344 -57.776562 -85.040281 -57.3625 -85.040281 L -56.022656 -85.040281 C 
-55.6125 -85.040281 -55.276562 -84.704344 -55.276562 -84.294187 L -55.276562 
-74.450437 C -55.276562 -74.036375 -55.6125 -73.700437 -56.022656 -73.700437 Z 
M -56.022656 -73.700437 " transform="matrix(1,0,0,-1,67.925,11.073)"/>
-<path 
style="fill-rule:nonzero;fill:rgb(94.999695%,94.999695%,94.999695%);fill-opacity:1;stroke-width:1.19553;stroke-linecap:butt;stroke-linejoin:miter;stroke:rgb(19.999695%,19.999695%,19.999695%);stroke-opacity:1;stroke-miterlimit:10;"
 d="M 57.364063 -73.700437 L 56.024219 -73.700437 C 55.610156 -73.700437 
55.278125 -74.036375 55.278125 -74.450437 L 55.278125 -84.294187 C 55.278125 
-84.704344 55.610156 -85.040281 56.024219 -85.040281 L 57.364063 -85.040281 C 
57.778125 -85.040281 58.110156 -84.704344 58.110156 -84.294187 L 58.110156 
-74.450437 C 58.110156 -74.036375 57.778125 -73.700437 57.364063 -73.700437 Z M 
57.364063 -73.700437 " transform="matrix(1,0,0,-1,67.925,11.073)"/>
-<path 
style="fill:none;stroke-width:0.79701;stroke-linecap:butt;stroke-linejoin:miter;stroke:rgb(12.998962%,28.999329%,52.999878%);stroke-opacity:1;stroke-miterlimit:10;"
 d="M 0.492969 -63.852781 L 53.633594 -71.825437 " 
transform="matrix(1,0,0,-1,67.925,11.073)"/>
-<path style=" 
stroke:none;fill-rule:nonzero;fill:rgb(12.998962%,28.999329%,52.999878%);fill-opacity:1;"
 d="M 124.125 83.285156 L 120.328125 80.617188 L 121.558594 82.898438 L 
119.714844 84.71875 "/>
-<path 
style="fill:none;stroke-width:0.79701;stroke-linecap:butt;stroke-linejoin:miter;stroke:rgb(12.998962%,28.999329%,52.999878%);stroke-opacity:1;stroke-miterlimit:10;"
 d="M -0.491406 -63.852781 L -53.635937 -71.825437 " 
transform="matrix(1,0,0,-1,67.925,11.073)"/>
-<path style=" 
stroke:none;fill-rule:nonzero;fill:rgb(12.998962%,28.999329%,52.999878%);fill-opacity:1;"
 d="M 11.726562 83.285156 L 16.136719 84.71875 L 14.289062 82.898438 L 
15.519531 80.617188 "/>
-<path 
style="fill:none;stroke-width:0.79701;stroke-linecap:butt;stroke-linejoin:miter;stroke:rgb(30.999756%,59.999084%,1.998901%);stroke-opacity:1;stroke-miterlimit:10;"
 d="M 3.059375 -94.505125 L 56.2 -86.532469 " 
transform="matrix(1,0,0,-1,67.925,11.073)"/>
-<path style=" 
stroke:none;fill-rule:nonzero;fill:rgb(30.999756%,59.999084%,1.998901%);fill-opacity:1;"
 d="M 68.417969 105.960938 L 72.832031 107.398438 L 70.984375 105.578125 L 
72.214844 103.292969 "/>
-<path 
style="fill:none;stroke-width:0.79701;stroke-linecap:butt;stroke-linejoin:miter;stroke:rgb(30.999756%,59.999084%,1.998901%);stroke-opacity:1;stroke-miterlimit:10;"
 d="M -3.057812 -94.505125 L -56.198437 -86.532469 " 
transform="matrix(1,0,0,-1,67.925,11.073)"/>
-<path style=" 
stroke:none;fill-rule:nonzero;fill:rgb(30.999756%,59.999084%,1.998901%);fill-opacity:1;"
 d="M 67.433594 105.960938 L 63.636719 103.292969 L 64.867188 105.578125 L 
63.019531 107.398438 "/>
-<path 
style="fill-rule:nonzero;fill:rgb(94.999695%,94.999695%,94.999695%);fill-opacity:1;stroke-width:1.19553;stroke-linecap:butt;stroke-linejoin:miter;stroke:rgb(19.999695%,19.999695%,19.999695%);stroke-opacity:1;stroke-miterlimit:10;"
 d="M 0.66875 -96.380125 L -0.671094 -96.380125 C -1.08125 -96.380125 -1.417187 
-96.712156 -1.417187 -97.126219 L -1.417187 -101.302 C -1.417187 -101.712156 
-1.08125 -102.048094 -0.671094 -102.048094 L 0.66875 -102.048094 C 1.082813 
-102.048094 1.41875 -101.712156 1.41875 -101.302 L 1.41875 -97.126219 C 1.41875 
-96.712156 1.082813 -96.380125 0.66875 -96.380125 Z M 0.66875 -96.380125 " 
transform="matrix(1,0,0,-1,67.925,11.073)"/>
-<path 
style="fill-rule:nonzero;fill:rgb(94.999695%,94.999695%,94.999695%);fill-opacity:1;stroke-width:1.19553;stroke-linecap:butt;stroke-linejoin:miter;stroke:rgb(19.999695%,19.999695%,19.999695%);stroke-opacity:1;stroke-miterlimit:10;"
 d="M -56.022656 -113.387937 L -57.3625 -113.387937 C -57.776562 -113.387937 
-58.1125 -113.719969 -58.1125 -114.134031 L -58.1125 -123.977781 C -58.1125 
-124.391844 -57.776562 -124.727781 -57.3625 -124.727781 L -56.022656 
-124.727781 C -55.6125 -124.727781 -55.276562 -124.391844 -55.276562 
-123.977781 L -55.276562 -114.134031 C -55.276562 -113.719969 -55.6125 
-113.387937 -56.022656 -113.387937 Z M -56.022656 -113.387937 " 
transform="matrix(1,0,0,-1,67.925,11.073)"/>
-<path 
style="fill-rule:nonzero;fill:rgb(94.999695%,94.999695%,94.999695%);fill-opacity:1;stroke-width:1.19553;stroke-linecap:butt;stroke-linejoin:miter;stroke:rgb(19.999695%,19.999695%,19.999695%);stroke-opacity:1;stroke-miterlimit:10;"
 d="M 57.364063 -113.387937 L 56.024219 -113.387937 C 55.610156 -113.387937 
55.278125 -113.719969 55.278125 -114.134031 L 55.278125 -123.977781 C 55.278125 
-124.391844 55.610156 -124.727781 56.024219 -124.727781 L 57.364063 -124.727781 
C 57.778125 -124.727781 58.110156 -124.391844 58.110156 -123.977781 L 58.110156 
-114.134031 C 58.110156 -113.719969 57.778125 -113.387937 57.364063 -113.387937 
Z M 57.364063 -113.387937 " transform="matrix(1,0,0,-1,67.925,11.073)"/>
-<path 
style="fill:none;stroke-width:0.79701;stroke-linecap:butt;stroke-linejoin:miter;stroke:rgb(12.998962%,28.999329%,52.999878%);stroke-opacity:1;stroke-miterlimit:10;"
 d="M 0.492969 -103.540281 L 53.633594 -111.512937 " 
transform="matrix(1,0,0,-1,67.925,11.073)"/>
-<path style=" 
stroke:none;fill-rule:nonzero;fill:rgb(12.998962%,28.999329%,52.999878%);fill-opacity:1;"
 d="M 124.125 122.96875 L 120.328125 120.300781 L 121.558594 122.585938 L 
119.714844 124.40625 "/>
-<path 
style="fill:none;stroke-width:0.79701;stroke-linecap:butt;stroke-linejoin:miter;stroke:rgb(12.998962%,28.999329%,52.999878%);stroke-opacity:1;stroke-miterlimit:10;"
 d="M -0.491406 -103.540281 L -53.635937 -111.512937 " 
transform="matrix(1,0,0,-1,67.925,11.073)"/>
-<path style=" 
stroke:none;fill-rule:nonzero;fill:rgb(12.998962%,28.999329%,52.999878%);fill-opacity:1;"
 d="M 11.726562 122.96875 L 16.136719 124.40625 L 14.289062 122.585938 L 
15.519531 120.300781 "/>
-<path 
style="fill:none;stroke-width:0.79701;stroke-linecap:butt;stroke-linejoin:miter;stroke:rgb(30.999756%,59.999084%,1.998901%);stroke-opacity:1;stroke-miterlimit:10;"
 d="M 3.059375 -134.188719 L 56.2 -126.216062 " 
transform="matrix(1,0,0,-1,67.925,11.073)"/>
-<path style=" 
stroke:none;fill-rule:nonzero;fill:rgb(30.999756%,59.999084%,1.998901%);fill-opacity:1;"
 d="M 68.417969 145.644531 L 72.832031 147.082031 L 70.984375 145.261719 L 
72.214844 142.980469 "/>
-<path 
style="fill:none;stroke-width:0.79701;stroke-linecap:butt;stroke-linejoin:miter;stroke:rgb(30.999756%,59.999084%,1.998901%);stroke-opacity:1;stroke-miterlimit:10;"
 d="M -3.057812 -134.188719 L -56.198437 -126.216062 " 
transform="matrix(1,0,0,-1,67.925,11.073)"/>
-<path style=" 
stroke:none;fill-rule:nonzero;fill:rgb(30.999756%,59.999084%,1.998901%);fill-opacity:1;"
 d="M 67.433594 145.644531 L 63.636719 142.980469 L 64.867188 145.261719 L 
63.019531 147.082031 "/>
-<path 
style="fill-rule:nonzero;fill:rgb(94.999695%,94.999695%,94.999695%);fill-opacity:1;stroke-width:1.19553;stroke-linecap:butt;stroke-linejoin:miter;stroke:rgb(19.999695%,19.999695%,19.999695%);stroke-opacity:1;stroke-miterlimit:10;"
 d="M 0.66875 -136.063719 L -0.671094 -136.063719 C -1.08125 -136.063719 
-1.417187 -136.399656 -1.417187 -136.813719 L -1.417187 -140.985594 C -1.417187 
-141.399656 -1.08125 -141.735594 -0.671094 -141.735594 L 0.66875 -141.735594 C 
1.082813 -141.735594 1.41875 -141.399656 1.41875 -140.985594 L 1.41875 
-136.813719 C 1.41875 -136.399656 1.082813 -136.063719 0.66875 -136.063719 Z M 
0.66875 -136.063719 " transform="matrix(1,0,0,-1,67.925,11.073)"/>
-</g>
-</svg>

Reply via email to