You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@crail.apache.org by ps...@apache.org on 2018/02/21 15:09:07 UTC

incubator-crail-website git commit: Publishing from af165cf7ac262c54804afbc762ce15bdf29cb015

Repository: incubator-crail-website
Updated Branches:
  refs/heads/asf-site 77142dc2f -> 8a329e79e


Publishing from af165cf7ac262c54804afbc762ce15bdf29cb015


Project: http://git-wip-us.apache.org/repos/asf/incubator-crail-website/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-crail-website/commit/8a329e79
Tree: http://git-wip-us.apache.org/repos/asf/incubator-crail-website/tree/8a329e79
Diff: http://git-wip-us.apache.org/repos/asf/incubator-crail-website/diff/8a329e79

Branch: refs/heads/asf-site
Commit: 8a329e79e722aedb859fb84027955e4901ab4cc9
Parents: 77142dc
Author: Patrick Stuedi <st...@zurich.ibm.com>
Authored: Wed Feb 21 16:04:51 2018 +0100
Committer: Patrick Stuedi <st...@zurich.ibm.com>
Committed: Wed Feb 21 16:04:51 2018 +0100

----------------------------------------------------------------------
 content/Gemfile      |   1 -
 content/Gemfile.lock | 212 ++++++----------------------------------------
 content/feed.xml     | 201 +++++++++++++++++++++++++++++++++++--------
 3 files changed, 191 insertions(+), 223 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-crail-website/blob/8a329e79/content/Gemfile
----------------------------------------------------------------------
diff --git a/content/Gemfile b/content/Gemfile
index 587c0ac..d204088 100644
--- a/content/Gemfile
+++ b/content/Gemfile
@@ -1,5 +1,4 @@
 source 'https://rubygems.org'
-gem 'github-pages'
 gem 'rouge'
 gem 'jekyll-paginate'
 gem 'jekyll-feed'

http://git-wip-us.apache.org/repos/asf/incubator-crail-website/blob/8a329e79/content/Gemfile.lock
----------------------------------------------------------------------
diff --git a/content/Gemfile.lock b/content/Gemfile.lock
index ad43d03..83e9e2e 100644
--- a/content/Gemfile.lock
+++ b/content/Gemfile.lock
@@ -1,206 +1,53 @@
 GEM
   remote: https://rubygems.org/
   specs:
-    activesupport (4.2.9)
-      i18n (~> 0.7)
-      minitest (~> 5.1)
-      thread_safe (~> 0.3, >= 0.3.4)
-      tzinfo (~> 1.1)
     addressable (2.5.2)
       public_suffix (>= 2.0.2, < 4.0)
-    coffee-script (2.4.1)
-      coffee-script-source
-      execjs
-    coffee-script-source (1.11.1)
     colorator (1.1.0)
-    commonmarker (0.17.7.1)
-      ruby-enum (~> 0.5)
     concurrent-ruby (1.0.5)
-    ethon (0.11.0)
-      ffi (>= 1.3.0)
-    execjs (2.7.0)
+    em-websocket (0.5.1)
+      eventmachine (>= 0.12.9)
+      http_parser.rb (~> 0.6.0)
+    eventmachine (1.2.5)
     faraday (0.14.0)
       multipart-post (>= 1.2, < 3)
-    ffi (1.9.18)
+    ffi (1.9.21)
     forwardable-extended (2.6.0)
-    gemoji (3.0.0)
-    github-pages (172)
-      activesupport (= 4.2.9)
-      github-pages-health-check (= 1.3.5)
-      jekyll (= 3.6.2)
-      jekyll-avatar (= 0.5.0)
-      jekyll-coffeescript (= 1.0.2)
-      jekyll-commonmark-ghpages (= 0.1.3)
-      jekyll-default-layout (= 0.1.4)
-      jekyll-feed (= 0.9.2)
-      jekyll-gist (= 1.4.1)
-      jekyll-github-metadata (= 2.9.3)
-      jekyll-mentions (= 1.2.0)
-      jekyll-optional-front-matter (= 0.3.0)
-      jekyll-paginate (= 1.1.0)
-      jekyll-readme-index (= 0.2.0)
-      jekyll-redirect-from (= 0.12.1)
-      jekyll-relative-links (= 0.5.2)
-      jekyll-remote-theme (= 0.2.3)
-      jekyll-sass-converter (= 1.5.0)
-      jekyll-seo-tag (= 2.3.0)
-      jekyll-sitemap (= 1.1.1)
-      jekyll-swiss (= 0.4.0)
-      jekyll-theme-architect (= 0.1.0)
-      jekyll-theme-cayman (= 0.1.0)
-      jekyll-theme-dinky (= 0.1.0)
-      jekyll-theme-hacker (= 0.1.0)
-      jekyll-theme-leap-day (= 0.1.0)
-      jekyll-theme-merlot (= 0.1.0)
-      jekyll-theme-midnight (= 0.1.0)
-      jekyll-theme-minimal (= 0.1.0)
-      jekyll-theme-modernist (= 0.1.0)
-      jekyll-theme-primer (= 0.5.2)
-      jekyll-theme-slate (= 0.1.0)
-      jekyll-theme-tactile (= 0.1.0)
-      jekyll-theme-time-machine (= 0.1.0)
-      jekyll-titles-from-headings (= 0.5.0)
-      jemoji (= 0.8.1)
-      kramdown (= 1.14.0)
-      liquid (= 4.0.0)
-      listen (= 3.0.6)
-      mercenary (~> 0.3)
-      minima (= 2.1.1)
-      rouge (= 2.2.1)
-      terminal-table (~> 1.4)
-    github-pages-health-check (1.3.5)
-      addressable (~> 2.3)
-      net-dns (~> 0.8)
-      octokit (~> 4.0)
-      public_suffix (~> 2.0)
-      typhoeus (~> 0.7)
-    html-pipeline (2.7.1)
-      activesupport (>= 2)
-      nokogiri (>= 1.4)
-    i18n (0.9.1)
+    http_parser.rb (0.6.0)
+    i18n (0.9.4)
       concurrent-ruby (~> 1.0)
-    jekyll (3.6.2)
+    jekyll (3.7.2)
       addressable (~> 2.4)
       colorator (~> 1.0)
+      em-websocket (~> 0.5)
+      i18n (~> 0.7)
       jekyll-sass-converter (~> 1.0)
-      jekyll-watch (~> 1.1)
+      jekyll-watch (~> 2.0)
       kramdown (~> 1.14)
       liquid (~> 4.0)
       mercenary (~> 0.3.3)
       pathutil (~> 0.9)
-      rouge (>= 1.7, < 3)
+      rouge (>= 1.7, < 4)
       safe_yaml (~> 1.0)
-    jekyll-avatar (0.5.0)
-      jekyll (~> 3.0)
-    jekyll-coffeescript (1.0.2)
-      coffee-script (~> 2.2)
-      coffee-script-source (~> 1.11.1)
-    jekyll-commonmark (1.1.0)
-      commonmarker (~> 0.14)
-      jekyll (>= 3.0, < 4.0)
-    jekyll-commonmark-ghpages (0.1.3)
-      commonmarker (~> 0.17.6)
-      jekyll-commonmark (~> 1)
-      rouge (~> 2)
-    jekyll-default-layout (0.1.4)
-      jekyll (~> 3.0)
-    jekyll-feed (0.9.2)
-      jekyll (~> 3.3)
-    jekyll-gist (1.4.1)
+    jekyll-feed (0.3.1)
+    jekyll-gist (1.5.0)
       octokit (~> 4.2)
-    jekyll-github-metadata (2.9.3)
-      jekyll (~> 3.1)
-      octokit (~> 4.0, != 4.4.0)
-    jekyll-mentions (1.2.0)
-      activesupport (~> 4.0)
-      html-pipeline (~> 2.3)
-      jekyll (~> 3.0)
     jekyll-oembed (0.0.1)
       jekyll
       ruby-oembed (= 0.8.8)
-    jekyll-optional-front-matter (0.3.0)
-      jekyll (~> 3.0)
     jekyll-paginate (1.1.0)
-    jekyll-readme-index (0.2.0)
-      jekyll (~> 3.0)
-    jekyll-redirect-from (0.12.1)
-      jekyll (~> 3.3)
-    jekyll-relative-links (0.5.2)
-      jekyll (~> 3.3)
-    jekyll-remote-theme (0.2.3)
-      jekyll (~> 3.5)
-      rubyzip (>= 1.2.1, < 3.0)
-      typhoeus (>= 0.7, < 2.0)
-    jekyll-sass-converter (1.5.0)
-      sass (~> 3.4)
-    jekyll-seo-tag (2.3.0)
-      jekyll (~> 3.3)
-    jekyll-sitemap (1.1.1)
-      jekyll (~> 3.3)
-    jekyll-swiss (0.4.0)
-    jekyll-theme-architect (0.1.0)
-      jekyll (~> 3.5)
-      jekyll-seo-tag (~> 2.0)
-    jekyll-theme-cayman (0.1.0)
-      jekyll (~> 3.5)
-      jekyll-seo-tag (~> 2.0)
-    jekyll-theme-dinky (0.1.0)
-      jekyll (~> 3.5)
-      jekyll-seo-tag (~> 2.0)
-    jekyll-theme-hacker (0.1.0)
-      jekyll (~> 3.5)
-      jekyll-seo-tag (~> 2.0)
-    jekyll-theme-leap-day (0.1.0)
-      jekyll (~> 3.5)
-      jekyll-seo-tag (~> 2.0)
-    jekyll-theme-merlot (0.1.0)
-      jekyll (~> 3.5)
-      jekyll-seo-tag (~> 2.0)
-    jekyll-theme-midnight (0.1.0)
-      jekyll (~> 3.5)
-      jekyll-seo-tag (~> 2.0)
-    jekyll-theme-minimal (0.1.0)
-      jekyll (~> 3.5)
-      jekyll-seo-tag (~> 2.0)
-    jekyll-theme-modernist (0.1.0)
-      jekyll (~> 3.5)
-      jekyll-seo-tag (~> 2.0)
-    jekyll-theme-primer (0.5.2)
-      jekyll (~> 3.5)
-      jekyll-github-metadata (~> 2.9)
-      jekyll-seo-tag (~> 2.2)
-    jekyll-theme-slate (0.1.0)
-      jekyll (~> 3.5)
-      jekyll-seo-tag (~> 2.0)
-    jekyll-theme-tactile (0.1.0)
-      jekyll (~> 3.5)
-      jekyll-seo-tag (~> 2.0)
-    jekyll-theme-time-machine (0.1.0)
-      jekyll (~> 3.5)
-      jekyll-seo-tag (~> 2.0)
-    jekyll-titles-from-headings (0.5.0)
-      jekyll (~> 3.3)
-    jekyll-watch (1.5.1)
+    jekyll-sass-converter (1.3.0)
+      sass (~> 3.2)
+    jekyll-watch (2.0.0)
       listen (~> 3.0)
-    jemoji (0.8.1)
-      activesupport (~> 4.0, >= 4.2.9)
-      gemoji (~> 3.0)
-      html-pipeline (~> 2.2)
-      jekyll (>= 3.0)
-    kramdown (1.14.0)
+    kramdown (1.16.2)
     liquid (4.0.0)
-    listen (3.0.6)
-      rb-fsevent (>= 0.9.3)
-      rb-inotify (>= 0.9.7)
+    listen (3.1.5)
+      rb-fsevent (~> 0.9, >= 0.9.4)
+      rb-inotify (~> 0.9, >= 0.9.7)
+      ruby_dep (~> 1.2)
     mercenary (0.3.6)
-    mini_portile2 (2.3.0)
-    minima (2.1.1)
-      jekyll (~> 3.3)
-    minitest (5.11.1)
     multipart-post (2.0.0)
-    net-dns (0.8.0)
-    nokogiri (1.8.1)
-      mini_portile2 (~> 2.3.0)
     octokit (4.8.0)
       sawyer (~> 0.8.0, >= 0.5.3)
     pathutil (0.16.1)
@@ -209,11 +56,9 @@ GEM
     rb-fsevent (0.10.2)
     rb-inotify (0.9.10)
       ffi (>= 0.5.0, < 2)
-    rouge (2.2.1)
-    ruby-enum (0.7.1)
-      i18n
+    rouge (3.1.1)
     ruby-oembed (0.8.8)
-    rubyzip (1.2.1)
+    ruby_dep (1.5.0)
     safe_yaml (1.0.4)
     sass (3.5.5)
       sass-listen (~> 4.0.0)
@@ -223,20 +68,11 @@ GEM
     sawyer (0.8.1)
       addressable (>= 2.3.5, < 2.6)
       faraday (~> 0.8, < 1.0)
-    terminal-table (1.8.0)
-      unicode-display_width (~> 1.1, >= 1.1.1)
-    thread_safe (0.3.6)
-    typhoeus (0.8.0)
-      ethon (>= 0.8.0)
-    tzinfo (1.2.4)
-      thread_safe (~> 0.1)
-    unicode-display_width (1.3.0)
 
 PLATFORMS
   ruby
 
 DEPENDENCIES
-  github-pages
   jekyll-feed
   jekyll-gist
   jekyll-oembed

http://git-wip-us.apache.org/repos/asf/incubator-crail-website/blob/8a329e79/content/feed.xml
----------------------------------------------------------------------
diff --git a/content/feed.xml b/content/feed.xml
index 99a0059..faca4b0 100644
--- a/content/feed.xml
+++ b/content/feed.xml
@@ -1,4 +1,40 @@
-<?xml version="1.0" encoding="utf-8"?><feed xmlns="http://www.w3.org/2005/Atom" ><generator uri="https://jekyllrb.com/" version="3.6.2">Jekyll</generator><link href="http://crail.incubator.apache.org//feed.xml" rel="self" type="application/atom+xml" /><link href="http://crail.incubator.apache.org//" rel="alternate" type="text/html" /><updated>2018-02-21T15:31:52+01:00</updated><id>http://crail.incubator.apache.org//</id><title type="html">The Apache Crail (Incubating) Project</title><entry><title type="html">Apache</title><link href="http://crail.incubator.apache.org//blog/2018/01/apache.html" rel="alternate" type="text/html" title="Apache" /><published>2018-01-22T00:00:00+01:00</published><updated>2018-01-22T00:00:00+01:00</updated><id>http://crail.incubator.apache.org//blog/2018/01/apache</id><content type="html" xml:base="http://crail.incubator.apache.org//blog/2018/01/apache.html">&lt;p&gt;Crail is now an Apache Incubator project!&lt;/p&gt;</content><author><name></name></author
 ><category term="news" /><summary type="html">Crail is now an Apache Incubator project!</summary></entry><entry><title type="html">Iops</title><link href="http://crail.incubator.apache.org//blog/2017/11/iops.html" rel="alternate" type="text/html" title="Iops" /><published>2017-11-23T00:00:00+01:00</published><updated>2017-11-23T00:00:00+01:00</updated><id>http://crail.incubator.apache.org//blog/2017/11/iops</id><content type="html" xml:base="http://crail.incubator.apache.org//blog/2017/11/iops.html">&lt;p&gt;New blog &lt;a href=&quot;http://crail.incubator.apache.org/blog/2017/11/crail-metadata.html&quot;&gt;post&lt;/a&gt; about Crail’s metadata performance and scalability&lt;/p&gt;</content><author><name></name></author><category term="news" /><summary type="html">New blog post about Crail’s metadata performance and scalability</summary></entry><entry><title type="html">Crail Storage Performance – Part III: Metadata</title><link href="http://crail.incubator.apache.org//blog/2
 017/11/crail-metadata.html" rel="alternate" type="text/html" title="Crail Storage Performance -- Part III: Metadata" /><published>2017-11-21T00:00:00+01:00</published><updated>2017-11-21T00:00:00+01:00</updated><id>http://crail.incubator.apache.org//blog/2017/11/crail-metadata</id><content type="html" xml:base="http://crail.incubator.apache.org//blog/2017/11/crail-metadata.html">&lt;div style=&quot;text-align: justify&quot;&gt;
+<?xml version="1.0" encoding="utf-8"?>
+<feed xmlns="http://www.w3.org/2005/Atom">
+<generator uri="http://jekyllrb.com" version="3.7.2">Jekyll</generator>
+<link href="http://crail.incubator.apache.org//feed.xml" rel="self" type="application/atom+xml" />
+<link href="http://crail.incubator.apache.org//" rel="alternate" type="text/html" />
+<updated>2018-02-21T16:04:49+01:00</updated>
+<id>http://crail.incubator.apache.org//</id>
+<title>The Apache Crail (Incubating) Project</title>
+<entry>
+<title>Apache</title>
+<link href="http://crail.incubator.apache.org//blog/2018/01/apache.html" rel="alternate" type="text/html" title="Apache" />
+<published>2018-01-22T00:00:00+01:00</published>
+<updated>2018-01-22T00:00:00+01:00</updated>
+<id>http://crail.incubator.apache.org//blog/2018/01/apache</id>
+<content type="html" xml:base="http://crail.incubator.apache.org//blog/2018/01/apache.html">&lt;p&gt;Crail is now an Apache Incubator project!&lt;/p&gt;
+</content>
+<category term="news" />
+<summary>Crail is now an Apache Incubator project!</summary>
+</entry>
+<entry>
+<title>Iops</title>
+<link href="http://crail.incubator.apache.org//blog/2017/11/iops.html" rel="alternate" type="text/html" title="Iops" />
+<published>2017-11-23T00:00:00+01:00</published>
+<updated>2017-11-23T00:00:00+01:00</updated>
+<id>http://crail.incubator.apache.org//blog/2017/11/iops</id>
+<content type="html" xml:base="http://crail.incubator.apache.org//blog/2017/11/iops.html">&lt;p&gt;New blog &lt;a href=&quot;{{ site.base }}/blog/2017/11/crail-metadata.html&quot;&gt;post&lt;/a&gt; about Crail’s metadata performance and scalability&lt;/p&gt;
+</content>
+<category term="news" />
+<summary>New blog post about Crail’s metadata performance and scalability</summary>
+</entry>
+<entry>
+<title>Crail Storage Performance – Part III: Metadata</title>
+<link href="http://crail.incubator.apache.org//blog/2017/11/crail-metadata.html" rel="alternate" type="text/html" title="Crail Storage Performance -- Part III: Metadata" />
+<published>2017-11-21T00:00:00+01:00</published>
+<updated>2017-11-21T00:00:00+01:00</updated>
+<id>http://crail.incubator.apache.org//blog/2017/11/crail-metadata</id>
+<content type="html" xml:base="http://crail.incubator.apache.org//blog/2017/11/crail-metadata.html">&lt;div style=&quot;text-align: justify&quot;&gt;
 &lt;p&gt;
 This is part III of our series of posts discussing Crail's raw storage performance. This part is about Crail's metadata performance and scalability.
 &lt;/p&gt;
@@ -33,11 +69,11 @@ This is part III of our series of posts discussing Crail's raw storage performan
 
 &lt;div style=&quot;text-align: justify&quot;&gt; 
 &lt;p&gt;
-As described in &lt;a href=&quot;http://crail.incubator.apache.org/blog/2017/08/crail-memory.html&quot;&gt;part I&lt;/a&gt;, Crail data operations are composed of actual data transfers and metadata operations. Examples of metadata operations are operations for creating or modifying the state of a file, or operations to lookup the storage server that stores a particular range (block) of a file. In Crail, all the metadata is managed by the namenode(s) (as opposed to the data which is managed by the storage nodes). Clients interact with Crail namenodes via Remote Procedure Calls (RPCs). Crail supports multiple RPC protocols for different types of networks and also offers a pluggable RPC interface so that new RPC bindings can be implemented easily. On RDMA networks, the default DaRPC (&lt;a href=&quot;https://dl.acm.org/citation.cfm?id=2670994&quot;&gt;DaRPC paper&lt;/a&gt;, &lt;a href=&quot;http://github.com/zrlio/darpc&quot;&gt;DaRPC GitHub&lt;/a&gt;) based RPC binding provides the be
 st performance. The figure below gives an overview of the Crail metadata processing in a DaRPC configuration. 
+As described in &lt;a href=&quot;{{ site.base }}/blog/2017/08/crail-memory.html&quot;&gt;part I&lt;/a&gt;, Crail data operations are composed of actual data transfers and metadata operations. Examples of metadata operations are operations for creating or modifying the state of a file, or operations to lookup the storage server that stores a particular range (block) of a file. In Crail, all the metadata is managed by the namenode(s) (as opposed to the data which is managed by the storage nodes). Clients interact with Crail namenodes via Remote Procedure Calls (RPCs). Crail supports multiple RPC protocols for different types of networks and also offers a pluggable RPC interface so that new RPC bindings can be implemented easily. On RDMA networks, the default DaRPC (&lt;a href=&quot;https://dl.acm.org/citation.cfm?id=2670994&quot;&gt;DaRPC paper&lt;/a&gt;, &lt;a href=&quot;http://github.com/zrlio/darpc&quot;&gt;DaRPC GitHub&lt;/a&gt;) based RPC binding provides the best performance. Th
 e figure below gives an overview of the Crail metadata processing in a DaRPC configuration. 
 &lt;/p&gt;
 &lt;/div&gt;
 
-&lt;div style=&quot;text-align:center&quot;&gt;&lt;img src=&quot;http://crail.incubator.apache.org/img/blog/crail-metadata/rpc.png&quot; width=&quot;480&quot; /&gt;&lt;/div&gt;
+&lt;div style=&quot;text-align:center&quot;&gt;&lt;img src=&quot;{{ site.base }}/img/blog/crail-metadata/rpc.png&quot; width=&quot;480&quot; /&gt;&lt;/div&gt;
 &lt;p&gt;&lt;br /&gt;&lt;/p&gt;
 
 &lt;div style=&quot;text-align: justify&quot;&gt; 
@@ -50,10 +86,10 @@ Crail supports partitioning of metadata across several namenods. Thereby, metada
 
 &lt;div style=&quot;text-align: justify&quot;&gt; 
 &lt;p&gt;
-In two of the previous blogs (&lt;a href=&quot;http://crail.incubator.apache.org/blog/2017/08/crail-memory.html&quot;&gt;DRAM&lt;/a&gt;, &lt;a href=&quot;http://crail.incubator.apache.org/blog/2017/08/crail-nvme-fabrics-v1.html&quot;&gt;NVMf&lt;/a&gt;) we have already shown that Crail metadata operations are very low latency. Essentially a single metadata operation issued by a remote client takes 5-6 microseconds, which is only slightly more than the raw network latency of the RDMA network fabric. In this blog, we want to explore the scalability of Crail's metadata management, that is, the number of clients Crail can support, or how Crail scales as the cluster size increases. The level of scalability of Crail is mainly determined by the number of metadata operations Crail can process concurrently, a metric that is often referred to as IOPS. The higher the number of IOPS the system can handle, the more clients can concurrently use Crail without performance loss. 
+In two of the previous blogs (&lt;a href=&quot;{{ site.base }}/blog/2017/08/crail-memory.html&quot;&gt;DRAM&lt;/a&gt;, &lt;a href=&quot;{{ site.base }}/blog/2017/08/crail-nvme-fabrics-v1.html&quot;&gt;NVMf&lt;/a&gt;) we have already shown that Crail metadata operations are very low latency. Essentially a single metadata operation issued by a remote client takes 5-6 microseconds, which is only slightly more than the raw network latency of the RDMA network fabric. In this blog, we want to explore the scalability of Crail's metadata management, that is, the number of clients Crail can support, or how Crail scales as the cluster size increases. The level of scalability of Crail is mainly determined by the number of metadata operations Crail can process concurrently, a metric that is often referred to as IOPS. The higher the number of IOPS the system can handle, the more clients can concurrently use Crail without performance loss. 
 &lt;/p&gt;
 &lt;p&gt;
-An important metadata operation is ''getFile()'', which is used by clients to lookup the status of a file (whether the file exists, what size it has, etc.). The ''getFile()'' operation is served by Crail's fast lock-free map and in spirit is very similar to the ''getBlock()'' metadata operation (used by clients to query which storage nodes holds a particular block). In a typical Crail use case, ''getFile()'' and ''getBlock()'' are responsible for the peak metadata load at a namenode. In this experiment, we measure the achievable IOPS on the server side in an artificial configuration with many clients distributed across the cluster issuing ''getFile()'' in a tight loop. Note that the client side RPC interface in Crail is asynchronous, thus, clients can issue multiple metadata operations without blocking while asynchronously waiting for the result. In the experiments below, each client may have a maximum of 128 ''getFile()'' operations outstanding at any point in time. In a practical 
 scenario, Crail clients may also have multiple metadata operations in flight either because clients are shared by different cores, or because Crail interleaves metadata and data operations (see &lt;a href=&quot;http://crail.incubator.apache.org/blog/2017/08/crail-memory.html&quot;&gt;DRAM&lt;/a&gt;). What makes the benchmark artificial is that clients exclusively focus on generating load for the namenode and thereby are neither performing data operations nor are they doing any compute. The basic command of the benchmark as executed by each of the individual clients is given by the following command:
+An important metadata operation is ''getFile()'', which is used by clients to lookup the status of a file (whether the file exists, what size it has, etc.). The ''getFile()'' operation is served by Crail's fast lock-free map and in spirit is very similar to the ''getBlock()'' metadata operation (used by clients to query which storage nodes holds a particular block). In a typical Crail use case, ''getFile()'' and ''getBlock()'' are responsible for the peak metadata load at a namenode. In this experiment, we measure the achievable IOPS on the server side in an artificial configuration with many clients distributed across the cluster issuing ''getFile()'' in a tight loop. Note that the client side RPC interface in Crail is asynchronous, thus, clients can issue multiple metadata operations without blocking while asynchronously waiting for the result. In the experiments below, each client may have a maximum of 128 ''getFile()'' operations outstanding at any point in time. In a practical 
 scenario, Crail clients may also have multiple metadata operations in flight either because clients are shared by different cores, or because Crail interleaves metadata and data operations (see &lt;a href=&quot;{{ site.base }}/blog/2017/08/crail-memory.html&quot;&gt;DRAM&lt;/a&gt;). What makes the benchmark artificial is that clients exclusively focus on generating load for the namenode and thereby are neither performing data operations nor are they doing any compute. The basic command of the benchmark as executed by each of the individual clients is given by the following command:
 &lt;/p&gt;
 &lt;/div&gt;
 &lt;div class=&quot;highlighter-rouge&quot;&gt;&lt;div class=&quot;highlight&quot;&gt;&lt;pre class=&quot;highlight&quot;&gt;&lt;code&gt;./bin/crail iobench -t getMultiFileAsync -f / -k 10000000 -b 128
@@ -96,7 +132,7 @@ The line of the raw number of IOPS, labeled ''ib send'' is shown in the same gra
 &lt;/p&gt;
 &lt;/div&gt;
 &lt;p&gt;&lt;br /&gt;&lt;/p&gt;
-&lt;div style=&quot;text-align:center&quot;&gt;&lt;img src=&quot;http://crail.incubator.apache.org/img/blog/crail-metadata/namenode_ibsend_iops64.svg&quot; width=&quot;550&quot; /&gt;&lt;/div&gt;
+&lt;div style=&quot;text-align:center&quot;&gt;&lt;img src=&quot;{{ site.base }}/img/blog/crail-metadata/namenode_ibsend_iops64.svg&quot; width=&quot;550&quot; /&gt;&lt;/div&gt;
 &lt;p&gt;&lt;br /&gt;&lt;/p&gt;
 &lt;div style=&quot;text-align: justify&quot;&gt; 
 &lt;p&gt;
@@ -113,7 +149,7 @@ To increase the number of IOPS the overall system can handle, we allow starting
 &lt;/p&gt;
 &lt;/div&gt;
 &lt;p&gt;&lt;br /&gt;&lt;/p&gt;
-&lt;div style=&quot;text-align:center&quot;&gt;&lt;img src=&quot;http://crail.incubator.apache.org/img/blog/crail-metadata/namenode_multi64.svg&quot; width=&quot;550&quot; /&gt;&lt;/div&gt;
+&lt;div style=&quot;text-align:center&quot;&gt;&lt;img src=&quot;{{ site.base }}/img/blog/crail-metadata/namenode_multi64.svg&quot; width=&quot;550&quot; /&gt;&lt;/div&gt;
 &lt;p&gt;&lt;br /&gt;&lt;/p&gt;
 
 &lt;div style=&quot;text-align: justify&quot;&gt; 
@@ -141,7 +177,7 @@ namenodes happening, which should lead to linear scalability.
 &lt;div style=&quot;text-align: justify&quot;&gt; 
 &lt;p&gt;
 Let us look at a concrete application, which ideally runs on a large cluster:
-TeraSort. In a previous blog, &lt;a href=&quot;http://crail.incubator.apache.org/blog/2017/01/sorting.html&quot;&gt;sorting&lt;/a&gt;,
+TeraSort. In a previous blog, &lt;a href=&quot;{{ site.base }}/blog/2017/01/sorting.html&quot;&gt;sorting&lt;/a&gt;,
 we analyze performance characteristics of TeraSort on Crail on a big cluster
 of 128 nodes, where we run 384 executors in total. This already proves that
 Crail can at least handle 384 clients. Now we analyze the theoretical number
@@ -175,7 +211,7 @@ namenode over the elapsed runtime of the TeraSort application.
 &lt;/div&gt;
 
 &lt;p&gt;&lt;br /&gt;&lt;/p&gt;
-&lt;div style=&quot;text-align:center&quot;&gt;&lt;img src=&quot;http://crail.incubator.apache.org/img/blog/crail-metadata/terasort_iops.svg&quot; width=&quot;550&quot; /&gt;&lt;/div&gt;
+&lt;div style=&quot;text-align:center&quot;&gt;&lt;img src=&quot;{{ site.base }}/img/blog/crail-metadata/terasort_iops.svg&quot; width=&quot;550&quot; /&gt;&lt;/div&gt;
 &lt;p&gt;&lt;br /&gt;&lt;/p&gt;
 
 &lt;div style=&quot;text-align: justify&quot;&gt; 
@@ -339,7 +375,7 @@ plot shows the number of IOPS relative to the number of clients.
 &lt;/div&gt;
 
 &lt;p&gt;&lt;br /&gt;&lt;/p&gt;
-&lt;div style=&quot;text-align:center&quot;&gt;&lt;img src=&quot;http://crail.incubator.apache.org/img/blog/crail-metadata/namenode_hdfs_iops.svg&quot; width=&quot;550&quot; /&gt;&lt;/div&gt;
+&lt;div style=&quot;text-align:center&quot;&gt;&lt;img src=&quot;{{ site.base }}/img/blog/crail-metadata/namenode_hdfs_iops.svg&quot; width=&quot;550&quot; /&gt;&lt;/div&gt;
 &lt;p&gt;&lt;br /&gt;&lt;/p&gt;
 
 &lt;div style=&quot;text-align: justify&quot;&gt;
@@ -393,7 +429,7 @@ the blog with the latest numbers as soon as the bug is fixed.
 &lt;/div&gt;
 
 &lt;p&gt;&lt;br /&gt;&lt;/p&gt;
-&lt;div style=&quot;text-align:center&quot;&gt;&lt;img src=&quot;http://crail.incubator.apache.org/img/blog/crail-metadata/ramcloud_iops.svg&quot; width=&quot;550&quot; /&gt;&lt;/div&gt;
+&lt;div style=&quot;text-align:center&quot;&gt;&lt;img src=&quot;{{ site.base }}/img/blog/crail-metadata/ramcloud_iops.svg&quot; width=&quot;550&quot; /&gt;&lt;/div&gt;
 &lt;p&gt;&lt;br /&gt;&lt;/p&gt;
 
 &lt;div style=&quot;text-align: justify&quot;&gt;
@@ -424,7 +460,7 @@ of 30Mio/s with 4 namenodes).
 &lt;/div&gt;
 
 &lt;p&gt;&lt;br /&gt;&lt;/p&gt;
-&lt;div style=&quot;text-align:center&quot;&gt;&lt;img src=&quot;http://crail.incubator.apache.org/img/blog/crail-metadata/max_iops_crail_hdfs_ramcloud.svg&quot; width=&quot;550&quot; /&gt;&lt;/div&gt;
+&lt;div style=&quot;text-align:center&quot;&gt;&lt;img src=&quot;{{ site.base }}/img/blog/crail-metadata/max_iops_crail_hdfs_ramcloud.svg&quot; width=&quot;550&quot; /&gt;&lt;/div&gt;
 &lt;p&gt;&lt;br /&gt;&lt;/p&gt;
 
 &lt;div style=&quot;text-align: justify&quot;&gt;
@@ -444,7 +480,44 @@ of operations even compared to a C++-based system like RAMCloud.
 &lt;p&gt;
 In this blog we show three key points of Crail: First, Crail's namenode performs the same as ib_send_bw with realistic parameters in terms of IOPS. This shows that the actual processing of the RPC is implemented efficiently. Second, with only one namenode, Crail performs 10x to 50x better than RAMCloud and HDFS, two popular systems, where RAMCloud is RDMA-based and implemented natively. Third, Crail's metadata service can be scaled out to serve large number of clients. We have shown that Crail offers near linear scaling with up to 4 namenodes, offering a performance that is sufficient to serve several 1000s of clients. 
 &lt;/p&gt;
-&lt;/div&gt;</content><author><name>Adrian Schuepbach and Patrick Stuedi</name></author><category term="blog" /><summary type="html">This is part III of our series of posts discussing Crail's raw storage performance. This part is about Crail's metadata performance and scalability.</summary></entry><entry><title type="html">Floss</title><link href="http://crail.incubator.apache.org//blog/2017/11/floss.html" rel="alternate" type="text/html" title="Floss" /><published>2017-11-17T00:00:00+01:00</published><updated>2017-11-17T00:00:00+01:00</updated><id>http://crail.incubator.apache.org//blog/2017/11/floss</id><content type="html" xml:base="http://crail.incubator.apache.org//blog/2017/11/floss.html">&lt;p&gt;Crail features in the &lt;a href=&quot;https://twit.tv/shows/floss-weekly/episodes/458?autostart=false&quot;&gt;FLOSS weekly podcast&lt;/a&gt;&lt;/p&gt;</content><author><name></name></author><category term="news" /><summary type="html">Crail features in the FLOSS weekly podcast</sum
 mary></entry><entry><title type="html">Blog</title><link href="http://crail.incubator.apache.org//blog/2017/11/blog.html" rel="alternate" type="text/html" title="Blog" /><published>2017-11-17T00:00:00+01:00</published><updated>2017-11-17T00:00:00+01:00</updated><id>http://crail.incubator.apache.org//blog/2017/11/blog</id><content type="html" xml:base="http://crail.incubator.apache.org//blog/2017/11/blog.html">&lt;p&gt;New blog &lt;a href=&quot;http://crail.incubator.apache.org/blog/2017/11/rdmashuffle.html&quot;&gt;post&lt;/a&gt; about SparkRDMA and Crail shuffle plugins&lt;/p&gt;</content><author><name></name></author><category term="news" /><summary type="html">New blog post about SparkRDMA and Crail shuffle plugins</summary></entry><entry><title type="html">Spark Shuffle: SparkRDMA vs Crail</title><link href="http://crail.incubator.apache.org//blog/2017/11/rdmashuffle.html" rel="alternate" type="text/html" title="Spark Shuffle: SparkRDMA vs Crail" /><published>2017-11-17T00:00:00
 +01:00</published><updated>2017-11-17T00:00:00+01:00</updated><id>http://crail.incubator.apache.org//blog/2017/11/rdmashuffle</id><content type="html" xml:base="http://crail.incubator.apache.org//blog/2017/11/rdmashuffle.html">&lt;div style=&quot;text-align: justify&quot;&gt;
+&lt;/div&gt;
+
+</content>
+<author>
+<name>Adrian Schuepbach and Patrick Stuedi</name>
+</author>
+<category term="blog" />
+<summary>This is part III of our series of posts discussing Crail's raw storage performance. This part is about Crail's metadata performance and scalability.</summary>
+</entry>
+<entry>
+<title>Floss</title>
+<link href="http://crail.incubator.apache.org//blog/2017/11/floss.html" rel="alternate" type="text/html" title="Floss" />
+<published>2017-11-17T00:00:00+01:00</published>
+<updated>2017-11-17T00:00:00+01:00</updated>
+<id>http://crail.incubator.apache.org//blog/2017/11/floss</id>
+<content type="html" xml:base="http://crail.incubator.apache.org//blog/2017/11/floss.html">&lt;p&gt;Crail features in the &lt;a href=&quot;https://twit.tv/shows/floss-weekly/episodes/458?autostart=false&quot;&gt;FLOSS weekly podcast&lt;/a&gt;&lt;/p&gt;
+</content>
+<category term="news" />
+<summary>Crail features in the FLOSS weekly podcast</summary>
+</entry>
+<entry>
+<title>Blog</title>
+<link href="http://crail.incubator.apache.org//blog/2017/11/blog.html" rel="alternate" type="text/html" title="Blog" />
+<published>2017-11-17T00:00:00+01:00</published>
+<updated>2017-11-17T00:00:00+01:00</updated>
+<id>http://crail.incubator.apache.org//blog/2017/11/blog</id>
+<content type="html" xml:base="http://crail.incubator.apache.org//blog/2017/11/blog.html">&lt;p&gt;New blog &lt;a href=&quot;{{ site.base }}/blog/2017/11/rdmashuffle.html&quot;&gt;post&lt;/a&gt; about SparkRDMA and Crail shuffle plugins&lt;/p&gt;
+</content>
+<category term="news" />
+<summary>New blog post about SparkRDMA and Crail shuffle plugins</summary>
+</entry>
+<entry>
+<title>Spark Shuffle: SparkRDMA vs Crail</title>
+<link href="http://crail.incubator.apache.org//blog/2017/11/rdmashuffle.html" rel="alternate" type="text/html" title="Spark Shuffle: SparkRDMA vs Crail" />
+<published>2017-11-17T00:00:00+01:00</published>
+<updated>2017-11-17T00:00:00+01:00</updated>
+<id>http://crail.incubator.apache.org//blog/2017/11/rdmashuffle</id>
+<content type="html" xml:base="http://crail.incubator.apache.org//blog/2017/11/rdmashuffle.html">&lt;div style=&quot;text-align: justify&quot;&gt;
 &lt;p&gt;
 This blog is comparing the shuffle performance of Crail with SparkRDMA, an alternative RDMA-based shuffle plugin for Spark.
 &lt;/p&gt;
@@ -492,7 +565,7 @@ In contrast, the Crail shuffler plugin takes a more holistic approach and levera
 &lt;p&gt;Lets start by quantitatively assessing performance gains from the Crail shuffle plugin and SparkRDMA. As described above, SparkRDMA can be operated in two different modes. Users decide which mode to use by selecting a particular type of shuffle writer (spark.shuffle.rdma.shuffleWriterMethod). The Wrapper shuffle writer writes shuffle data to files between the stages, the Chunked shuffle writer stores shuffle data in memory. We evaluate both writer methods for terasort and SQL equijoin.
 &lt;/p&gt;
 &lt;/div&gt;
-&lt;div style=&quot;text-align:center&quot;&gt;&lt;img src=&quot;http://crail.incubator.apache.org/img/blog/rdma-shuffle/terasort.svg&quot; width=&quot;550&quot; /&gt;&lt;/div&gt;
+&lt;div style=&quot;text-align:center&quot;&gt;&lt;img src=&quot;{{ site.base }}/img/blog/rdma-shuffle/terasort.svg&quot; width=&quot;550&quot; /&gt;&lt;/div&gt;
 &lt;p&gt;&lt;br /&gt;&lt;/p&gt;
 &lt;div style=&quot;text-align: justify&quot;&gt;
 &lt;p&gt;
@@ -502,7 +575,7 @@ First we run &lt;a href=&quot;https://github.com/zrlio/crail-spark-terasort&quot
 The plot above shows runtimes of the various configuration we run with terasort. SparkRDMA with the Wrapper shuffle writer performance slightly better (3-4%) than vanilla Spark whereas the Chunked shuffle writer shows a 30% overhead. On a quick inspection we found that this overhead stems from memory allocation and registration for the shuffle data that is kept in memory between the stages. Compared to vanilla Spark, Crail's shuffle plugin shows performance improvement of around 235%.
 &lt;/p&gt;
 &lt;/div&gt;
-&lt;div style=&quot;text-align:center&quot;&gt;&lt;img src=&quot;http://crail.incubator.apache.org/img/blog/rdma-shuffle/sql.svg&quot; width=&quot;550&quot; /&gt;&lt;/div&gt;
+&lt;div style=&quot;text-align:center&quot;&gt;&lt;img src=&quot;{{ site.base }}/img/blog/rdma-shuffle/sql.svg&quot; width=&quot;550&quot; /&gt;&lt;/div&gt;
 &lt;p&gt;&lt;br /&gt;&lt;/p&gt;
 
 &lt;div style=&quot;text-align: justify&quot;&gt;
@@ -522,7 +595,22 @@ For our second workload we choose the &lt;a href=&quot;https://github.com/zrlio/
 These benchmarks validate our belief that a &quot;last-mile&quot; integration cannot deliver the same performance gains as a holistic approach, i.e. one has to look at the whole picture in how to integrate RDMA into Spark applications (and for that matter any framework or application). Only replacing the data transfer alone does not lead to the anticipated performance increase. We learned this the hard way when we intially started working on Crail.
 &lt;/p&gt;
 
-&lt;/div&gt;</content><author><name>Jonas Pfefferle, Patrick Stuedi, Animesh Trivedi, Bernard Metzler, Adrian Schuepbach</name></author><category term="blog" /><summary type="html">This blog is comparing the shuffle performance of Crail with SparkRDMA, an alternative RDMA-based shuffle plugin for Spark.</summary></entry><entry><title type="html">Crail Storage Performance – Part II: NVMf</title><link href="http://crail.incubator.apache.org//blog/2017/08/crail-nvme-fabrics-v1.html" rel="alternate" type="text/html" title="Crail Storage Performance -- Part II: NVMf" /><published>2017-08-22T00:00:00+02:00</published><updated>2017-08-22T00:00:00+02:00</updated><id>http://crail.incubator.apache.org//blog/2017/08/crail-nvme-fabrics-v1</id><content type="html" xml:base="http://crail.incubator.apache.org//blog/2017/08/crail-nvme-fabrics-v1.html">&lt;div style=&quot;text-align: justify&quot;&gt;
+&lt;/div&gt;
+
+</content>
+<author>
+<name>Jonas Pfefferle, Patrick Stuedi, Animesh Trivedi, Bernard Metzler, Adrian Schuepbach</name>
+</author>
+<category term="blog" />
+<summary>This blog is comparing the shuffle performance of Crail with SparkRDMA, an alternative RDMA-based shuffle plugin for Spark.</summary>
+</entry>
+<entry>
+<title>Crail Storage Performance – Part II: NVMf</title>
+<link href="http://crail.incubator.apache.org//blog/2017/08/crail-nvme-fabrics-v1.html" rel="alternate" type="text/html" title="Crail Storage Performance -- Part II: NVMf" />
+<published>2017-08-22T00:00:00+02:00</published>
+<updated>2017-08-22T00:00:00+02:00</updated>
+<id>http://crail.incubator.apache.org//blog/2017/08/crail-nvme-fabrics-v1</id>
+<content type="html" xml:base="http://crail.incubator.apache.org//blog/2017/08/crail-nvme-fabrics-v1.html">&lt;div style=&quot;text-align: justify&quot;&gt;
 &lt;p&gt;
 This is part II of our series of posts discussing Crail's raw storage performance. This part is about Crail's NVMe storage tier, a low-latency flash storage backend for Crail completely based on user-level storage access.
 &lt;/p&gt;
@@ -605,7 +693,7 @@ The main take away from this plot is that the time it takes to perform a random
 &lt;/p&gt;
 &lt;/div&gt;
 
-&lt;div style=&quot;text-align:center&quot;&gt;&lt;img src=&quot;http://crail.incubator.apache.org/img/blog/crail-nvmf/latency.svg&quot; width=&quot;550&quot; /&gt;&lt;/div&gt;
+&lt;div style=&quot;text-align:center&quot;&gt;&lt;img src=&quot;{{ site.base }}/img/blog/crail-nvmf/latency.svg&quot; width=&quot;550&quot; /&gt;&lt;/div&gt;
 &lt;p&gt;&lt;br /&gt;&lt;/p&gt;
 
 &lt;div style=&quot;text-align: justify&quot;&gt; 
@@ -620,16 +708,16 @@ The second plot shows sequential read and write throughput with a transfer size
 &lt;/code&gt;&lt;/pre&gt;&lt;/div&gt;&lt;/div&gt;
 &lt;div style=&quot;text-align: justify&quot;&gt; 
 &lt;p&gt;
-For sequential operations in Crail, metadata fetching is inlined with data operations as described in the &lt;a href=&quot;http://crail.incubator.apache.org/blog/2017/08/crail-memory.html&quot;&gt;DRAM&lt;/a&gt; blog. This is possible as long as the data transfer has a lower latency than the metadata RPC, which is typically the case. As a consequence, our NVMf storage tier reaches the same throughput as the native SPDK benchmark (device limit).
+For sequential operations in Crail, metadata fetching is inlined with data operations as described in the &lt;a href=&quot;{{ site.base }}/blog/2017/08/crail-memory.html&quot;&gt;DRAM&lt;/a&gt; blog. This is possible as long as the data transfer has a lower latency than the metadata RPC, which is typically the case. As a consequence, our NVMf storage tier reaches the same throughput as the native SPDK benchmark (device limit).
 &lt;/p&gt;
 &lt;/div&gt;
-&lt;div style=&quot;text-align:center&quot;&gt;&lt;img src=&quot;http://crail.incubator.apache.org/img/blog/crail-nvmf/throughput.svg&quot; width=&quot;550&quot; /&gt;&lt;/div&gt;
+&lt;div style=&quot;text-align:center&quot;&gt;&lt;img src=&quot;{{ site.base }}/img/blog/crail-nvmf/throughput.svg&quot; width=&quot;550&quot; /&gt;&lt;/div&gt;
 
 &lt;h3 id=&quot;sequential-throughput&quot;&gt;Sequential Throughput&lt;/h3&gt;
 
 &lt;div style=&quot;text-align: justify&quot;&gt; 
 &lt;p&gt;
-Let us look at the sequential read and write throughput for buffered and direct streams and compare them to a buffered Crail stream on DRAM. All benchmarks are single thread/client performed against 8 storage nodes with 4 drives each, cf. configuration above. In this benchmark we use 32 outstanding operations for the NVMf storage tier buffered stream experiments by using a buffer size of 16MB and a slice size of 512KB, cf. &lt;a href=&quot;http://crail.incubator.apache.org/blog/2017/07/crail-memory.html&quot;&gt;part I&lt;/a&gt;. The buffered stream reaches line speed at a transfer size of around 1KB and shows only slightly slower performance when compared to the DRAM tier buffered stream. However we are only using 2 outstanding operations with the DRAM tier to achieve these results. Basically for sizes smaller than 1KB the buffered stream is limited by the copy speed to fill the application buffer. The direct stream reaches line speed at around 128KB with 128 outstanding operations
 . Here no copy operation is performed for transfer size greater than 512Byte (sector size). The command to run the Crail buffered stream benchmark:
+Let us look at the sequential read and write throughput for buffered and direct streams and compare them to a buffered Crail stream on DRAM. All benchmarks are single thread/client performed against 8 storage nodes with 4 drives each, cf. configuration above. In this benchmark we use 32 outstanding operations for the NVMf storage tier buffered stream experiments by using a buffer size of 16MB and a slice size of 512KB, cf. &lt;a href=&quot;{{ site.base }}/blog/2017/07/crail-memory.html&quot;&gt;part I&lt;/a&gt;. The buffered stream reaches line speed at a transfer size of around 1KB and shows only slightly slower performance when compared to the DRAM tier buffered stream. However we are only using 2 outstanding operations with the DRAM tier to achieve these results. Basically for sizes smaller than 1KB the buffered stream is limited by the copy speed to fill the application buffer. The direct stream reaches line speed at around 128KB with 128 outstanding operations. Here no copy ope
 ration is performed for transfer size greater than 512Byte (sector size). The command to run the Crail buffered stream benchmark:
 &lt;/p&gt;
 &lt;/div&gt;
 &lt;div class=&quot;highlighter-rouge&quot;&gt;&lt;div class=&quot;highlight&quot;&gt;&lt;pre class=&quot;highlight&quot;&gt;&lt;code&gt;./bin/crail iobench -t read -s &amp;lt;size&amp;gt; -k &amp;lt;iterations&amp;gt; -w 32 -f /tmp.dat
@@ -638,7 +726,7 @@ Let us look at the sequential read and write throughput for buffered and direct
 &lt;div class=&quot;highlighter-rouge&quot;&gt;&lt;div class=&quot;highlight&quot;&gt;&lt;pre class=&quot;highlight&quot;&gt;&lt;code&gt;./bin/crail iobench -t readAsync -s &amp;lt;size&amp;gt; -k &amp;lt;iterations&amp;gt; -b 128 -w 32 -f /tmp.dat
 &lt;/code&gt;&lt;/pre&gt;&lt;/div&gt;&lt;/div&gt;
 
-&lt;div style=&quot;text-align:center&quot;&gt;&lt;img src=&quot;http://crail.incubator.apache.org/img/blog/crail-nvmf/throughput2.svg&quot; width=&quot;550&quot; /&gt;&lt;/div&gt;
+&lt;div style=&quot;text-align:center&quot;&gt;&lt;img src=&quot;{{ site.base }}/img/blog/crail-nvmf/throughput2.svg&quot; width=&quot;550&quot; /&gt;&lt;/div&gt;
 
 &lt;h3 id=&quot;random-read-latency&quot;&gt;Random Read Latency&lt;/h3&gt;
 
@@ -648,16 +736,16 @@ Random read latency is limited by the flash technology and we currently see arou
 &lt;/p&gt;
 &lt;/div&gt;
 
-&lt;div style=&quot;text-align:center&quot;&gt;&lt;img src=&quot;http://crail.incubator.apache.org/img/blog/crail-nvmf/latency2.svg&quot; width=&quot;550&quot; /&gt;&lt;/div&gt;
+&lt;div style=&quot;text-align:center&quot;&gt;&lt;img src=&quot;{{ site.base }}/img/blog/crail-nvmf/latency2.svg&quot; width=&quot;550&quot; /&gt;&lt;/div&gt;
 
 &lt;h3 id=&quot;tiering-dram---nvmf&quot;&gt;Tiering DRAM - NVMf&lt;/h3&gt;
 
 &lt;div style=&quot;text-align: justify&quot;&gt; 
 &lt;p&gt;
-In this paragraph we show how Crail can leverage flash memory when there is not sufficient DRAM available in the cluster to hold all the data. As described in the &lt;a href=&quot;http://crail.incubator.apache.org/overview/&quot;&gt;overview&lt;/a&gt; section, if you have multiple storage tiers deployed in Crail, e.g. the DRAM tier and the NVMf tier, Crail by default first uses up all available resources of the faster tier. Basically a remote resource of a faster tier (e.g. remote DRAM) is preferred over a slower local resource (e.g., local flash), motivated by the fast network. This is what we call horizontal tiering.
+In this paragraph we show how Crail can leverage flash memory when there is not sufficient DRAM available in the cluster to hold all the data. As described in the &lt;a href=&quot;{{ site.base }}/overview/&quot;&gt;overview&lt;/a&gt; section, if you have multiple storage tiers deployed in Crail, e.g. the DRAM tier and the NVMf tier, Crail by default first uses up all available resources of the faster tier. Basically a remote resource of a faster tier (e.g. remote DRAM) is preferred over a slower local resource (e.g., local flash), motivated by the fast network. This is what we call horizontal tiering.
 &lt;/p&gt;
 &lt;/div&gt;
-&lt;div style=&quot;text-align:center&quot;&gt;&lt;img src=&quot;http://crail.incubator.apache.org/img/blog/crail-nvmf/crail_tiering.png&quot; width=&quot;500&quot; vspace=&quot;10&quot; /&gt;&lt;/div&gt;
+&lt;div style=&quot;text-align:center&quot;&gt;&lt;img src=&quot;{{ site.base }}/img/blog/crail-nvmf/crail_tiering.png&quot; width=&quot;500&quot; vspace=&quot;10&quot; /&gt;&lt;/div&gt;
 &lt;p&gt;&lt;br /&gt;&lt;/p&gt;
 &lt;div style=&quot;text-align: justify&quot;&gt; 
 &lt;p&gt;
@@ -665,9 +753,24 @@ In the following 200G Terasort experiment we gradually limit the DRAM resources
 &lt;/p&gt;
 &lt;/div&gt;
 
-&lt;div style=&quot;text-align:center&quot;&gt;&lt;img src=&quot;http://crail.incubator.apache.org/img/blog/crail-nvmf/tiering.svg&quot; width=&quot;550&quot; /&gt;&lt;/div&gt;
+&lt;div style=&quot;text-align:center&quot;&gt;&lt;img src=&quot;{{ site.base }}/img/blog/crail-nvmf/tiering.svg&quot; width=&quot;550&quot; /&gt;&lt;/div&gt;
+
+&lt;p&gt;To summarize, in this blog we have shown that the NVMf storage backend for Crail – due to its efficient user-level implementation – offers latencies and throughput very close to the hardware speed. The Crail NVMf storage tier can be used conveniently in combination with the Crail DRAM tier to either save cost or to handle situations where the available DRAM is not sufficient to store the working set of a data processing workload.&lt;/p&gt;
 
-&lt;p&gt;To summarize, in this blog we have shown that the NVMf storage backend for Crail – due to its efficient user-level implementation – offers latencies and throughput very close to the hardware speed. The Crail NVMf storage tier can be used conveniently in combination with the Crail DRAM tier to either save cost or to handle situations where the available DRAM is not sufficient to store the working set of a data processing workload.&lt;/p&gt;</content><author><name>Jonas Pfefferle</name></author><category term="blog" /><summary type="html">This is part II of our series of posts discussing Crail's raw storage performance. This part is about Crail's NVMe storage tier, a low-latency flash storage backend for Crail completely based on user-level storage access.</summary></entry><entry><title type="html">Crail Storage Performance – Part I: DRAM</title><link href="http://crail.incubator.apache.org//blog/2017/08/crail-memory.html" rel="alternate" type="text/html" title="Crail S
 torage Performance -- Part I: DRAM" /><published>2017-08-18T00:00:00+02:00</published><updated>2017-08-18T00:00:00+02:00</updated><id>http://crail.incubator.apache.org//blog/2017/08/crail-memory</id><content type="html" xml:base="http://crail.incubator.apache.org//blog/2017/08/crail-memory.html">&lt;div style=&quot;text-align: justify&quot;&gt; 
+</content>
+<author>
+<name>Jonas Pfefferle</name>
+</author>
+<category term="blog" />
+<summary>This is part II of our series of posts discussing Crail's raw storage performance. This part is about Crail's NVMe storage tier, a low-latency flash storage backend for Crail completely based on user-level storage access.</summary>
+</entry>
+<entry>
+<title>Crail Storage Performance – Part I: DRAM</title>
+<link href="http://crail.incubator.apache.org//blog/2017/08/crail-memory.html" rel="alternate" type="text/html" title="Crail Storage Performance -- Part I: DRAM" />
+<published>2017-08-18T00:00:00+02:00</published>
+<updated>2017-08-18T00:00:00+02:00</updated>
+<id>http://crail.incubator.apache.org//blog/2017/08/crail-memory</id>
+<content type="html" xml:base="http://crail.incubator.apache.org//blog/2017/08/crail-memory.html">&lt;div style=&quot;text-align: justify&quot;&gt; 
 &lt;p&gt;
 It's summer and there is some time to blog about things. This blog post is the first in a series of three posts where we illustrate Crail's raw storage performance on our 100Gbps cluster. In part I we cover Crail's DRAM storage tier, part II will be about Crail's NVMe flash storage tier, and part III will be about Crail's metadata performance. 
 &lt;/p&gt;
@@ -734,7 +837,7 @@ One challenge with file read/write operations is to avoid blocking in case block
 &lt;/p&gt;
 &lt;/div&gt;
 &lt;p&gt;&lt;br /&gt;&lt;/p&gt;
-&lt;div style=&quot;text-align:center&quot;&gt;&lt;img src=&quot;http://crail.incubator.apache.org/img/blog/crail-memory/anatomy.png&quot; width=&quot;420&quot; /&gt;&lt;/div&gt;
+&lt;div style=&quot;text-align:center&quot;&gt;&lt;img src=&quot;{{ site.base }}/img/blog/crail-memory/anatomy.png&quot; width=&quot;420&quot; /&gt;&lt;/div&gt;
 &lt;p&gt;&lt;br /&gt;&lt;/p&gt;
 &lt;div style=&quot;text-align: justify&quot;&gt; 
 &lt;p&gt;
@@ -770,8 +873,8 @@ The figure below illustrates the sequential write (top) and read (bottom) perfor
 &lt;/p&gt;
 &lt;/div&gt;
 &lt;p&gt;&lt;br /&gt;&lt;/p&gt;
-&lt;div style=&quot;text-align:center&quot;&gt;&lt;img src=&quot;http://crail.incubator.apache.org/img/blog/crail-memory/write.svg&quot; width=&quot;550&quot; /&gt;&lt;/div&gt;
-&lt;div style=&quot;text-align:center&quot;&gt;&lt;img src=&quot;http://crail.incubator.apache.org/img/blog/crail-memory/read.svg&quot; width=&quot;550&quot; /&gt;&lt;/div&gt;
+&lt;div style=&quot;text-align:center&quot;&gt;&lt;img src=&quot;{{ site.base }}/img/blog/crail-memory/write.svg&quot; width=&quot;550&quot; /&gt;&lt;/div&gt;
+&lt;div style=&quot;text-align:center&quot;&gt;&lt;img src=&quot;{{ site.base }}/img/blog/crail-memory/read.svg&quot; width=&quot;550&quot; /&gt;&lt;/div&gt;
 &lt;p&gt;&lt;br /&gt;&lt;br /&gt;&lt;/p&gt;
 &lt;div style=&quot;text-align: justify&quot;&gt; 
 &lt;p&gt;
@@ -782,8 +885,8 @@ Note that both figures show single-client performance numbers. With Crail being
 &lt;/p&gt;
 &lt;/div&gt;
 
-&lt;div style=&quot;text-align:center&quot;&gt;&lt;img src=&quot;http://crail.incubator.apache.org/img/blog/crail-memory/crail-groupby.svg&quot; width=&quot;550&quot; /&gt;&lt;/div&gt;
-&lt;div style=&quot;text-align:center&quot;&gt;&lt;img src=&quot;http://crail.incubator.apache.org/img/blog/crail-memory/spark-groupby.svg&quot; width=&quot;550&quot; /&gt;&lt;/div&gt;
+&lt;div style=&quot;text-align:center&quot;&gt;&lt;img src=&quot;{{ site.base }}/img/blog/crail-memory/crail-groupby.svg&quot; width=&quot;550&quot; /&gt;&lt;/div&gt;
+&lt;div style=&quot;text-align:center&quot;&gt;&lt;img src=&quot;{{ site.base }}/img/blog/crail-memory/spark-groupby.svg&quot; width=&quot;550&quot; /&gt;&lt;/div&gt;
 
 &lt;h3 id=&quot;random-read-latency&quot;&gt;Random Read Latency&lt;/h3&gt;
 
@@ -799,7 +902,7 @@ Typically, distributed storage systems are either built for sequential access to
 The figure below illustrates the latencies of get() operations for different key/value sizes and compares them to the latencies we obtained with RAMCloud for the same type of operations (measured using RAMClouds C and Java APIs). RAMCloud is a low-latency key/value store implemented using RDMA. RAMCloud actually provides durable storage by asynchronously replicating data onto backup devices. However, at any point in time all the data is held in DRAM and read requests will be served from DRAM directly. Up to our knowledge, RAMCloud is the fastest key/value store that is (a) available open source and (b) can be deployed in practice as a storage platform for applications. Other similar RDMA-based storage systems we looked at, like FaRM or HERD, are either not open source or they do not provide a clean separation between storage system, API and clients. 
 &lt;/p&gt;
 &lt;/div&gt;
-&lt;div style=&quot;text-align:center&quot;&gt;&lt;img src=&quot;http://crail.incubator.apache.org/img/blog/crail-memory/latency.svg&quot; width=&quot;550&quot; /&gt;&lt;/div&gt;
+&lt;div style=&quot;text-align:center&quot;&gt;&lt;img src=&quot;{{ site.base }}/img/blog/crail-memory/latency.svg&quot; width=&quot;550&quot; /&gt;&lt;/div&gt;
 
 &lt;div style=&quot;text-align: justify&quot;&gt; 
 &lt;p&gt;
@@ -813,11 +916,41 @@ The latency advantages of Crail are beneficial also at the application level. Th
 &lt;/p&gt;
 &lt;/div&gt;
 
-&lt;div style=&quot;text-align:center&quot;&gt;&lt;img src=&quot;http://crail.incubator.apache.org/img/blog/crail-memory/cdf-broadcast-128-read.svg&quot; width=&quot;550&quot; /&gt;&lt;/div&gt;
+&lt;div style=&quot;text-align:center&quot;&gt;&lt;img src=&quot;{{ site.base }}/img/blog/crail-memory/cdf-broadcast-128-read.svg&quot; width=&quot;550&quot; /&gt;&lt;/div&gt;
 
 &lt;div style=&quot;text-align: justify&quot;&gt; 
 &lt;p&gt;
 To summarize, in this blog post we have shown that Crail's DRAM storage tier provides both throughput and latency close to the hardware limits. These performance benefits enable high-level data processing operations like shuffle or broadcast to be implemented faster and/or more efficient.
 &lt;/p&gt;
 
-&lt;/div&gt;</content><author><name>Patrick Stuedi</name></author><category term="blog" /><summary type="html">It's summer and there is some time to blog about things. This blog post is the first in a series of three posts where we illustrate Crail's raw storage performance on our 100Gbps cluster. In part I we cover Crail's DRAM storage tier, part II will be about Crail's NVMe flash storage tier, and part III will be about Crail's metadata performance. I recently read the Octopus file system Usenix'17 paper, where the authors show Crail performance numbers that do not match the performance we measure on our clusters. Like many other distributed systems, Crail also requires a careful system configuration and wrong or mismatching configuration settings can easily lead to poor performance. Therefore, in this blog we try to point out the key parameter settings that are necessary to obtain proper performance numbers with Crail.</summary></entry><entry><title type="html">Openpower</title>
 <link href="http://crail.incubator.apache.org//blog/2017/08/openpower.html" rel="alternate" type="text/html" title="Openpower" /><published>2017-08-04T00:00:00+02:00</published><updated>2017-08-04T00:00:00+02:00</updated><id>http://crail.incubator.apache.org//blog/2017/08/openpower</id><content type="html" xml:base="http://crail.incubator.apache.org//blog/2017/08/openpower.html">&lt;p&gt;Crail on OpenPower discussed by Peter Hofstee on &lt;a href=&quot;https://www.youtube.com/watch?v=f-pgMaEmqn4&amp;amp;feature=youtu.be&amp;amp;platform=hootsuite&quot;&gt;Youtube&lt;/a&gt;&lt;/p&gt;</content><author><name></name></author><category term="news" /><summary type="html">Crail on OpenPower discussed by Peter Hofstee on Youtube</summary></entry><entry><title type="html">Disni</title><link href="http://crail.incubator.apache.org//blog/2017/06/disni.html" rel="alternate" type="text/html" title="Disni" /><published>2017-06-17T00:00:00+02:00</published><updated>2017-06-17T00:00:00+02:00</updat
 ed><id>http://crail.incubator.apache.org//blog/2017/06/disni</id><content type="html" xml:base="http://crail.incubator.apache.org//blog/2017/06/disni.html">&lt;p&gt;DiSNI, the RDMA and NVMe user-level stack used in Crail is now available on &lt;a href=&quot;https://search.maven.org/&quot;&gt;Maven Central&lt;/a&gt;&lt;/p&gt;</content><author><name></name></author><category term="news" /><summary type="html">DiSNI, the RDMA and NVMe user-level stack used in Crail is now available on Maven Central</summary></entry></feed>
\ No newline at end of file
+&lt;/div&gt;
+</content>
+<author>
+<name>Patrick Stuedi</name>
+</author>
+<category term="blog" />
+<summary> It's summer and there is some time to blog about things. This blog post is the first in a series of three posts where we illustrate Crail's raw storage performance on our 100Gbps cluster. In part I we cover Crail's DRAM storage tier, part II will be about Crail's NVMe flash storage tier, and part III will be about Crail's metadata performance. I recently read the Octopus file system Usenix'17 paper, where the authors show Crail performance numbers that do not match the performance we measure on our clusters. Like many other distributed systems, Crail also requires a careful system configuration and wrong or mismatching configuration settings can easily lead to poor performance. Therefore, in this blog we try to point out the key parameter settings that are necessary to obtain proper performance numbers with Crail. </summary>
+</entry>
+<entry>
+<title>Openpower</title>
+<link href="http://crail.incubator.apache.org//blog/2017/08/openpower.html" rel="alternate" type="text/html" title="Openpower" />
+<published>2017-08-04T00:00:00+02:00</published>
+<updated>2017-08-04T00:00:00+02:00</updated>
+<id>http://crail.incubator.apache.org//blog/2017/08/openpower</id>
+<content type="html" xml:base="http://crail.incubator.apache.org//blog/2017/08/openpower.html">&lt;p&gt;Crail on OpenPower discussed by Peter Hofstee on &lt;a href=&quot;https://www.youtube.com/watch?v=f-pgMaEmqn4&amp;amp;feature=youtu.be&amp;amp;platform=hootsuite&quot;&gt;Youtube&lt;/a&gt;&lt;/p&gt;
+</content>
+<category term="news" />
+<summary>Crail on OpenPower discussed by Peter Hofstee on Youtube</summary>
+</entry>
+<entry>
+<title>Disni</title>
+<link href="http://crail.incubator.apache.org//blog/2017/06/disni.html" rel="alternate" type="text/html" title="Disni" />
+<published>2017-06-17T00:00:00+02:00</published>
+<updated>2017-06-17T00:00:00+02:00</updated>
+<id>http://crail.incubator.apache.org//blog/2017/06/disni</id>
+<content type="html" xml:base="http://crail.incubator.apache.org//blog/2017/06/disni.html">&lt;p&gt;DiSNI, the RDMA and NVMe user-level stack used in Crail is now available on &lt;a href=&quot;https://search.maven.org/&quot;&gt;Maven Central&lt;/a&gt;&lt;/p&gt;
+</content>
+<category term="news" />
+<summary>DiSNI, the RDMA and NVMe user-level stack used in Crail is now available on Maven Central</summary>
+</entry>
+</feed>