You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@heron.apache.org by jo...@apache.org on 2021/02/01 14:55:10 UTC

[incubator-heron] branch master updated: Joshfischer/add download links (#3671)

This is an automated email from the ASF dual-hosted git repository.

joshfischer pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-heron.git


The following commit(s) were added to refs/heads/master by this push:
     new 43fa2ff  Joshfischer/add download links (#3671)
43fa2ff is described below

commit 43fa2ffd41a2fd23c69834934f9b0f66d9775257
Author: Josh Fischer <jo...@joshfischer.io>
AuthorDate: Mon Feb 1 08:54:48 2021 -0600

    Joshfischer/add download links (#3671)
    
    * initial addition of heron download page.  Still work to do.
    
    * adding start to release page; adding updates for versioned docs
    
    * adding versioned docs
---
 website2/docs/compiling-overview.md                |   4 +-
 website2/website/heron-release.json                |   3 +
 website2/website/pages/en/download.js              | 157 ++++++
 website2/website/releases.json                     |   5 +-
 website2/website/scripts/replace.js                |   1 +
 website2/website/siteConfig.js                     |   1 +
 .../compiling-overview.md                          |   7 +-
 .../guides-troubeshooting-guide.md                 | 238 ++++++++
 .../version-0.20.3-incubating/schedulers-nomad.md  | 439 +++++++++++++++
 .../topology-development-streamlet-api.md          | 617 +++++++++++++++++++++
 .../version-0.20.3-incubating/uploaders-http.md    |  65 +++
 .../version-0.20.3-incubating-sidebars.json        |  99 ++++
 website2/website/versions.json                     |   1 +
 13 files changed, 1629 insertions(+), 8 deletions(-)

diff --git a/website2/docs/compiling-overview.md b/website2/docs/compiling-overview.md
index d891b4e..e93f986 100644
--- a/website2/docs/compiling-overview.md
+++ b/website2/docs/compiling-overview.md
@@ -20,7 +20,7 @@ sidebar_label: Compiling Overview
     under the License.
 -->
 
-Heron is currently available for [Mac OS X 10.14](compiling-osx),
+Heron is currently available for [Mac OS X 11.01](compiling-osx),
 [Ubuntu 18.04](compiling-linux), and [Debian10](compiling-docker#building-heron).
  This guide describes the basics of the
 Heron build system. For step-by-step build instructions for other platforms,
@@ -50,7 +50,7 @@ You must have the following installed to compile Heron:
 * [GNU Libtool](http://www.gnu.org/software/libtool/) >= 2.4.6
 * [gcc/g++](https://gcc.gnu.org/) >= 4.8.1 (Linux platforms)
 * [CMake](https://cmake.org/) >= 2.6.4
-* [Python](https://www.python.org/) >= 3.4
+* [Python](https://www.python.org/) >= 3.8
 * [Perl](https://www.perl.org/) >= 5.8.8
 * [Ant] (https://ant.apache.org/) >= 1.10.0
 * [CppUnit] (https://freedesktop.org/wiki/Software/cppunit/) >= 1.10.1
diff --git a/website2/website/heron-release.json b/website2/website/heron-release.json
new file mode 100644
index 0000000..666bcb3
--- /dev/null
+++ b/website2/website/heron-release.json
@@ -0,0 +1,3 @@
+[
+  "0.20.3-incubating"
+]
\ No newline at end of file
diff --git a/website2/website/pages/en/download.js b/website2/website/pages/en/download.js
new file mode 100644
index 0000000..df631b5
--- /dev/null
+++ b/website2/website/pages/en/download.js
@@ -0,0 +1,157 @@
+const React = require('react');
+
+const CompLibrary = require('../../core/CompLibrary');
+const MarkdownBlock = CompLibrary.MarkdownBlock; /* Used to read markdown */
+const Container = CompLibrary.Container;
+const GridBlock = CompLibrary.GridBlock;
+
+const CWD = process.cwd();
+
+const siteConfig = require(`${CWD}/siteConfig.js`);
+const releases = require(`${CWD}/releases.json`);
+const heronReleases = require(`${CWD}/heron-release.json`)
+
+function getLatestArchiveMirrorUrl(version, type) {
+    return `https://www.apache.org/dyn/mirrors/mirrors.cgi?action=download&filename=incubator/heron/heron-${version}/heron-${version}-${type}.tar.gz`
+}
+
+function distUrl(version, type) {
+    return `https://www.apache.org/dist/incubator/heron/heron-${version}/heron-${version}-${type}.tar.gz`
+}
+
+function archiveUrl(version, type) {
+    if (version.includes('incubating')) {
+        return `https://archive.apache.org/dist/incubator/heron/heron-${version}/apache-heron-v-${version}-${type}.tar.gz`
+    } else {
+        return `https://archive.apache.org/dist/heron/heron-${version}/apache-heron-v-${version}-${type}.tar.gz`
+    }
+}
+
+
+
+class Download extends React.Component {
+  render() {
+    const latestVersion = releases[0];
+    const latestHeronVersion = heronReleases[0];
+    const latestArchiveMirrorUrl = getLatestArchiveMirrorUrl(latestVersion, 'bin');
+    const latestSrcArchiveMirrorUrl = getLatestArchiveMirrorUrl(latestVersion, 'src');
+    const latestArchiveUrl = distUrl(latestVersion, 'bin');
+    const latestSrcArchiveUrl = distUrl(latestVersion, 'src')
+
+    const releaseInfo = releases.map(version => {
+      return {
+        version: version,
+        binArchiveUrl: archiveUrl(version, 'bin'),
+        srcArchiveUrl: archiveUrl(version, 'source')
+      }
+    });
+
+
+    return (
+      <div className="pageContainer">
+        <Container className="mainContainer documentContainer postContainer">
+          <div className="post">
+            <header className="postHeader">
+              <h1>Apache Heron (Incubating) downloads</h1>
+              <hr />
+            </header>
+
+            <h2>Release notes</h2>
+            <div>
+              <p>
+                <a href={`${siteConfig.baseUrl}${this.props.language}/release-notes`}>Release notes</a> for all Heron's versions
+              </p>
+            </div>
+
+            <h2 id="latest">Current version (Stable) {latestHeronVersion}</h2>
+            <table className="versions" style={{width:'100%'}}>
+              <thead>
+                <tr>
+                  <th>Release</th>
+                  <th>Link</th>
+                  <th>Crypto files</th>
+                </tr>
+              </thead>
+              <tbody>
+
+
+                <tr key={'source'}>
+                  <th>Source</th>
+                  <td>
+                    <a href={latestSrcArchiveMirrorUrl}>apache-heron-{latestVersion}-src.tar.gz</a>
+                  </td>
+                  <td>
+                    <a href={`${latestSrcArchiveUrl}.asc`}>asc</a>,&nbsp;
+                    <a href={`${latestSrcArchiveUrl}.sha512`}>sha512</a>
+                  </td>
+                </tr>
+                </tbody>
+              </table>
+
+
+            <h2>Release Integrity</h2>
+            <MarkdownBlock>
+              You must [verify](https://www.apache.org/info/verification.html) the integrity of the downloaded files.
+              We provide OpenPGP signatures for every release file. This signature should be matched against the
+              [KEYS](https://downloads.apache.org/incubator/heron/KEYS) file which contains the OpenPGP keys of
+              Herons's Release Managers. We also provide `SHA-512` checksums for every release file.
+              After you download the file, you should calculate a checksum for your download, and make sure it is
+              the same as ours.
+            </MarkdownBlock>
+
+            <h2>Getting started</h2>
+            <div>
+              <p>
+
+                Once you've downloaded a Heron release, instructions on getting up and running with a standalone cluster
+                that you can run on your laptop can be found in the{' '}
+              &nbsp;
+                <a href={`${siteConfig.baseUrl}docs/getting-started-local-single-node`}>run Heron locally</a> tutorial.
+              </p>
+            </div>
+
+
+            <h2 id="archive">Older releases</h2>
+            <table className="versions">
+              <thead>
+                <tr>
+                  <th>Release</th>
+
+                  <th>Source</th>
+                  <th>Release notes</th>
+                </tr>
+              </thead>
+              <tbody>
+                {releaseInfo.map(
+                  info => {
+                        var sha = "sha512"
+                        if (info.version.includes('1.19.0-incubating') || info.version.includes('1.20.0-incubating')) {
+                            sha = "sha"
+                        }
+                        return info.version !== latestVersion && (
+                            <tr key={info.version}>
+                        <th>{info.version}</th>
+
+                          <td>
+                          <a href={info.srcArchiveUrl}>apache-heron-{info.version}-source.tar.gz</a>
+                              &nbsp;
+                          (<a href={`${info.srcArchiveUrl}.asc`}>asc</a>,&nbsp;
+                          <a href={`${info.srcArchiveUrl}.${sha}`}>{`${sha}`}</a>)
+                          </td>
+                          <td>
+                          <a href={`${siteConfig.baseUrl}${this.props.language}/release-notes#${info.version}`}>Release Notes</a>
+                          </td>
+                          </tr>
+                      )
+                    }
+                )}
+              </tbody>
+            </table>
+          </div>
+        </Container>
+      </div>
+    );
+  }
+}
+
+module.exports = Download;
\ No newline at end of file
diff --git a/website2/website/releases.json b/website2/website/releases.json
index bd3d303..99dd7df 100644
--- a/website2/website/releases.json
+++ b/website2/website/releases.json
@@ -1,5 +1,4 @@
 [
-    "0.20.0",
-    "0.19.0.16",
-    "0.19.0.12"   
+    "0.20.3-incubating",
+    "0.20.0-incubating"
 ]
\ No newline at end of file
diff --git a/website2/website/scripts/replace.js b/website2/website/scripts/replace.js
index 8fba053..bb5ac7a 100755
--- a/website2/website/scripts/replace.js
+++ b/website2/website/scripts/replace.js
@@ -38,6 +38,7 @@ const bazelVersions = {
     '0.20.0-incubating': '0.14.1',
     '0.20.1-incubating': '0.26.0',
     '0.20.2-incubating': '0.26.0',
+    '0.20.3-incubating': '3.7.0',
     'latest': '3.7.2',
 }
 
diff --git a/website2/website/siteConfig.js b/website2/website/siteConfig.js
index c5b9278..4559360 100644
--- a/website2/website/siteConfig.js
+++ b/website2/website/siteConfig.js
@@ -75,6 +75,7 @@ const siteConfig = {
     {href: '/api/java', label: "Javadocs"},
     {href: '/api/python', label: "Pydocs"},
     {doc: 'getting-started-local-single-node', label: 'Docs'},
+    {page: 'download', label: "Downloads"},
     {href: '#community', label: 'Community'},
     //{blog: true, label: 'Blog'},
     {href: '#apache', label: 'Apache'},
diff --git a/website2/docs/compiling-overview.md b/website2/website/versioned_docs/version-0.20.3-incubating/compiling-overview.md
similarity index 96%
copy from website2/docs/compiling-overview.md
copy to website2/website/versioned_docs/version-0.20.3-incubating/compiling-overview.md
index d891b4e..abd70c0 100644
--- a/website2/docs/compiling-overview.md
+++ b/website2/website/versioned_docs/version-0.20.3-incubating/compiling-overview.md
@@ -1,7 +1,8 @@
 ---
-id: compiling-overview
+id: version-0.20.3-incubating-compiling-overview
 title: Compiling Heron
 sidebar_label: Compiling Overview
+original_id: compiling-overview
 ---
 <!--
     Licensed to the Apache Software Foundation (ASF) under one
@@ -20,7 +21,7 @@ sidebar_label: Compiling Overview
     under the License.
 -->
 
-Heron is currently available for [Mac OS X 10.14](compiling-osx),
+Heron is currently available for [Mac OS X 11.01](compiling-osx),
 [Ubuntu 18.04](compiling-linux), and [Debian10](compiling-docker#building-heron).
  This guide describes the basics of the
 Heron build system. For step-by-step build instructions for other platforms,
@@ -50,7 +51,7 @@ You must have the following installed to compile Heron:
 * [GNU Libtool](http://www.gnu.org/software/libtool/) >= 2.4.6
 * [gcc/g++](https://gcc.gnu.org/) >= 4.8.1 (Linux platforms)
 * [CMake](https://cmake.org/) >= 2.6.4
-* [Python](https://www.python.org/) >= 3.4
+* [Python](https://www.python.org/) >= 3.8
 * [Perl](https://www.perl.org/) >= 5.8.8
 * [Ant] (https://ant.apache.org/) >= 1.10.0
 * [CppUnit] (https://freedesktop.org/wiki/Software/cppunit/) >= 1.10.1
diff --git a/website2/website/versioned_docs/version-0.20.3-incubating/guides-troubeshooting-guide.md b/website2/website/versioned_docs/version-0.20.3-incubating/guides-troubeshooting-guide.md
new file mode 100644
index 0000000..256b747
--- /dev/null
+++ b/website2/website/versioned_docs/version-0.20.3-incubating/guides-troubeshooting-guide.md
@@ -0,0 +1,238 @@
+---
+id: version-0.20.3-incubating-guides-troubeshooting-guide
+title: Topology Troubleshooting Guide
+sidebar_label: Topology Troubleshooting Guide
+original_id: guides-troubeshooting-guide
+---
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+      http://www.apache.org/licenses/LICENSE-2.0
+    Unless required by applicable law or agreed to in writing,
+    software distributed under the License is distributed on an
+    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+    KIND, either express or implied.  See the License for the
+    specific language governing permissions and limitations
+    under the License.
+-->
+
+### Overview
+
+This guide provides basic steps to troubleshoot a topology.
+These are starting steps to troubleshoot potential issues and identify root causes easily.
+
+This guide is organized into following broad sections:
+
+* [Determine topology running status and health](#running)
+* [Identify topology problems](#problem)
+* [Frequently seen issues](#frequent)
+
+This guide is useful for topology developers. Issues related to Heron configuration setup or
+its [internal architecture](heron-architecture), like `schedulers`, etc, are discussed in Configuration and Heron Developers respectively, and not discussed here.
+
+<a name="running"></a>
+
+### Determine topology running status and health
+
+#### 1. Estimate your data rate
+
+It is important to estimate how much data a topology is expected to consume.
+A useful approach is to begin by estimating a data rate in terms of items per minute. The emit count (tuples per minute) of each spout should match the data rate for the corresponding data
+stream. If spouts are not consuming and emitting the data at the same rate as it
+is produced, this is called `spout lag`.
+
+Some spouts, like `Kafka Spout` have a lag metric that can be
+directly used to measure health. It is recommended to have some kind of lag
+metric for a custom spout, so that it's easier to check and create monitoring alerts.
+
+#### 2. Absent Backpressure
+
+Backpressure initiated by an instance means that the concerned instance is not
+able to consume data at the same rate at which it is being receiving. This
+results in all spouts getting clamped (they will not consume any more data)
+until the backpressure is relieved by the instance.
+
+Backpressure is measured in milliseconds per minute, the time an instance was under backpressure.  For example, a value of 60,000 means an instance was under backpressure for the whole minute (60 seconds).
+
+A healthy topology should not have backpressure. Backpressure usually results in the
+spout lag build up since spouts get clamped, but it should not be considered as
+a cause, only a symptom.  
+
+Therefore, adjust and iterate Topology until backpressure is absent.
+
+#### 3. Absent failures
+
+Failed tuples are generally considered bad for a topology, unless it is a required feature (for instance, lowest possible latency is needed at the expense of possible dropped tuples). If
+`acking` is disabled, or even when enabled and not handled properly in spouts,
+this can result in data loss, without adding spout lag.
+
+
+<a name="problem"></a>
+### Identify topology problems
+
+#### 1. Look at instances under backpressure
+
+Backpressure metrics identifies which instances have been under backpressure. Therefore, jump directly to the logs of that instance to see what is going wrong with the
+instance. Some of the known causes of backpressure are discussed in the [frequently seen issues](#frequent) section below.
+
+#### 2. Look at items pending to be acked
+
+Spouts export a metric which is a sampled value of the number of tuples
+still in flight in the topology. Sometimes, `max-spout-pending` config limits
+the consumption rate of the topology. Increasing that spout's parallelism
+generally solves the issue.
+
+<a name="frequent"></a>
+
+### Frequently seen issues
+
+#### 1. Topology does not launch
+
+*Symptom* - Heron client fails to launch the topology.
+
+Note that heron client will execute the topology's `main` method on the local
+system, which means spouts and bolts get instantiated locally, serialized, and then
+sent over to schedulers as part of `topology.defn`. It is important to make sure
+that:
+
+1. All spouts and bolts are serializable.
+2. Don't instantiate a non-serializable attribute in constructor. Leave those to
+   a bolt's `prepare` or a spout's `open` method, which gets called during start
+   time of the instances.
+3. The `main` method should not try to access anything that your local machine
+   may not have access to.
+
+#### 2. Topology does not start
+
+We assume here that heron client has successfully launched the topology.
+
+*Symptom* - Physical plan or logical plan does not show up on UI
+
+*Possible Cause* - One of more of stream managers have not yet connected to
+Tmanager.
+
+*What to do* -
+
+1. Go to the Tmanager logs for the topology. The zeroth container is reserved for
+   Tmanager. Go to the container and browse to
+
+        log-files/heron-tmanager-<topology-name><topology-id>.INFO
+
+    and see which stream managers have not yet connected. The `stmgr` ID
+    corresponds to the container number. For example, `stmgr-10` corresponds to
+    container 10, and so on.
+
+2. Visit that container to
+    see what is wrong in stream manager's logs, which can be found in `log-files`
+    directory similar to Tmanager.
+
+#### 3. Instances are not starting up
+
+A topology would not start until all the instances are running. This may be a cause of a topology not starting.
+
+*Symptom* - The stream manager logs for that instance never showed that the
+instance connected to it.
+
+*Possible Cause* - Bad configs being passed when the instance process was
+getting launched.
+
+*What to do* -
+
+1. Visit the container and browse to `heron-executor.stdout` and
+   `heron-executor.stderr` files. All commands to instantiate the instances and
+   stream managers are redirected to these files.
+
+2. Check JVM configs for anything amiss.
+
+3. If `Xmx` is too low, increase `containerRAM` or `componentRAM`. Note that
+   because heron sets aside some RAM for its internal components, like stream
+   manager and metrics manager, having a large number of instances and low
+   `containerRAM` may starve off these instances.
+
+#### 4. Metrics for a component are missing/absent
+
+*Symptom* - The upstream component is emitting data, but this component is not
+executing any, and no metrics are being reported.
+
+*Possible Cause* - The component might be stuck in a deadlock. Since one
+instance is a single JVM process and user code is called from the main thread,
+it is possible that execution is stuck in `execute` method.
+
+*What to do* -
+
+1. Check logs for one of the concerned instances. If `open` (in a spout) or
+   `prepare` (in a bolt) method is not completed, check the code logic to see
+   why the method is not completed.
+
+2. Check the code logic if there is any deadlock in a bolt's `execute` or a
+   spout's `nextTuple`, `ack` or `fail` methods. These methods should be
+   non-blocking.
+
+#### 5. There is backpressure from internal bolt
+
+Bolts are called internal if it does not talk to any external service. For example,
+the last bolt might be talking to some database to write its results, and would
+not be called an internal bolt.
+
+This is invariably due to lack of resources given to this bolt. Increasing
+parallelism or RAM (based on code logic) can solve the issue.
+
+#### 6. There is backpressure from external bolt
+
+By the same definition as above, an external bolt is the one which is accessing
+an external service. It might still be emitting data downstream.
+
+*Possible Cause 1* - External service is slowing down this bolt.
+
+*What to do* -
+
+1. Check if the external service is the bottleneck, and see if adding resources
+   to it can solve it.
+
+2. Sometimes, changing bolt logic to tune caching vs write rate can make a
+   difference.
+
+*Possible Cause 2* - Resource crunch for this bolt, just like an internal bolt
+above.
+
+*What to do* -
+
+1. This should be handled in the same was as internal bolt - by increasing the
+   parallelism or RAM for the component.
+
+#### 7. Debugging Java topologies.
+The jar containing the code for building the topology, along with the spout and bolt 
+code, is deployed in the containers. A Heron Instance is started in each container, 
+with each Heron Instance responsible for running a bolt or a spout. One way to debug 
+Java code is to write debug logs to the log files for tracking and debugging purposes.
+
+Logging is the preferred mode for debugging as it makes it easer to find issues in both 
+the short and long term in the topology. If you want to perform step-by-step debugging 
+of a JVM process, however, this can be achieved by enabling remote debugging for the Heron Instance.
+
+Follow these steps to enable remote debugging:
+
+1. Add the java options to enable debuggin on all the Heron Instances that will be started.
+   This can be achieved by adding the options ```-agentlib:jdwp=transport=dt_socket,address=8888,server=y,suspend=n```. Here's an example:
+
+    ```java
+    conf.setDebug(true);
+    conf.setMaxSpoutPending(10);
+    conf.put(Config.TOPOLOGY_WORKER_CHILDOPTS, "-XX:+HeapDumpOnOutOfMemoryError");
+    conf.setComponentJvmOptions("word",
+           "-agentlib:jdwp=transport=dt_socket,address=8888,server=y,suspend=n");
+    conf.setComponentJvmOptions("exclaim1",
+           "-agentlib:jdwp=transport=dt_socket,address=8888,server=y,suspend=n");
+    ```
+
+2. Use the steps as given in the tutorial to setup remote debugging eith eclipse.
+   [set up Remote Debugging in Eclipse](http://help.eclipse.org/neon/index.jsp?topic=%2Forg.eclipse.jdt.doc.user%2Ftasks%2Ftask-remotejava_launch_config.htm) . 
+   To setup remote debugging with intelij use [remote debugging instructions](https://www.jetbrains.com/help/idea/2016.2/run-debug-configuration-remote.html) .
+ 
+3. Once the topology is activated start the debugger at ```localhost:{port}``` if
+   local deployment or ``` {IP}/{hostname}:{port}``` for multi container remote deployment. And you will be able to debug the code step by step.
diff --git a/website2/website/versioned_docs/version-0.20.3-incubating/schedulers-nomad.md b/website2/website/versioned_docs/version-0.20.3-incubating/schedulers-nomad.md
new file mode 100644
index 0000000..2d75cdb
--- /dev/null
+++ b/website2/website/versioned_docs/version-0.20.3-incubating/schedulers-nomad.md
@@ -0,0 +1,439 @@
+---
+id: version-0.20.3-incubating-schedulers-nomad
+title: Nomad
+sidebar_label: Nomad
+original_id: schedulers-nomad
+---
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+      http://www.apache.org/licenses/LICENSE-2.0
+    Unless required by applicable law or agreed to in writing,
+    software distributed under the License is distributed on an
+    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+    KIND, either express or implied.  See the License for the
+    specific language governing permissions and limitations
+    under the License.
+-->
+
+Heron supports [Hashicorp](https://hashicorp.com)'s [Nomad](https://nomadproject.io) as a scheduler.
+
+> Update: Heron now supports running on Nomad via [raw exec driver](https://www.nomadproject.io/docs/drivers/raw_exec.html) and [docker driver](https://www.nomadproject.io/docs/drivers/docker.html)
+
+## Nomad setup
+
+Setting up a nomad cluster will not be covered here. See the [official Nomad docs](https://www.nomadproject.io/intro/getting-started/install.html) for instructions.
+
+**Instructions on running Heron on Nomad via raw execs are located here**:
+
+Below are instructions on how to to run Heron on Nomad via raw execs.  In this mode, Heron executors will run as raw processes on the host machines. 
+
+The advantages of this mode is that it is incredibly lightweight and likely do not require sudo privileges to setup and run.  However in this mode, the setup procedure may be a little more complex compared to running via docker since there are more things to consider.  Also in resource allocation is considered but not enforced.
+
+## Requirements
+
+When setting up your Nomad cluster, the following are required:
+
+* The [Heron CLI tool](user-manuals-heron-cli) must be installed on each machine used to deploy Heron topologies
+* Python 3, Java 7 or 8, and [curl](https://curl.haxx.se/) must be installed on every machine in the cluster
+* A [ZooKeeper cluster](https://zookeeper.apache.org)
+
+## Configuring Heron settings
+
+Before running Heron via Nomad, you'll need to configure some settings. Once you've [installed Heron](getting-started-local-single-node), all of the configurations you'll need to modify will be in the `~/.heron/conf/nomad` diredctory.
+
+First, make sure that the `heron.nomad.driver` is set to "raw_exec" in `~/.heron/conf/nomad/scheduler.yaml` e.g.
+
+```yaml
+heron.nomad.driver: "raw_exec"
+```
+
+You'll need to use a topology uploader to deploy topology packages to nodes in your cluster. You can use one of the following uploaders:
+
+* The HTTP uploader in conjunction with Heron's [API server](deployment-api-server). The Heron API server acts like a file server to which users can upload topology packages. The API server distributes the packages, along with the Heron core package, to the relevant machines. You can also use the API server to submit your Heron topology to Nomad (described [below](#deploying-with-the-api-server)) <!-- TODO: link to upcoming HTTP uploader documentation -->
+* [Amazon S3](uploaders-amazon-s3). Please note that the S3 uploader requires an AWS account.
+* [SCP](uploaders-scp). Please note that the SCP uploader requires SSH access to nodes in the cluster.
+
+You can modify the `heron.class.uploader` parameter in `~/.heron/conf/nomad/uploader.yaml` to choose an uploader.
+
+In addition, you must update the `heron.statemgr.connection.string` parameter in the `statemgr.yaml` file in `~/.heron/conf/nomad` to your ZooKeeper connection string. Here's an example:
+
+```yaml
+heron.statemgr.connection.string: 127.0.0.1:2181
+```
+
+Then, update the `heron.nomad.scheduler.uri` parameter in `scheduler.yaml` to the URL of the Nomad server to which you'll be submitting jobs. Here's an example:
+
+```yaml
+heron.nomad.scheduler.uri: http://127.0.0.1:4646
+```
+
+You may also want to configure where Heron will store files on your machine if you're running Nomad locally (in `scheduler.yaml`). Here's an example:
+
+```yaml
+heron.scheduler.local.working.directory: ${HOME}/.herondata/topologies/${CLUSTER}/${ROLE}/${TOPOLOGY_ID}
+```
+
+> Heron uses string interpolation to fill in the missing values for `CLUSTER`, `ROLE`, etc.
+
+## Distributing Heron core
+
+The Heron core package needs to be made available for every machine in the cluster to download. You'll need to provide a URI for the Heron core package. Here are the currently supported protocols:
+
+* `file://` (local FS)
+* `http://` (HTTP)
+
+You can do this in one of several ways:
+
+* Use the Heron API server to distribute `heron-core.tar.gz` (see [here](deployment-api-server) for more info)
+* Copy `heron-core.tar.gz` onto every node in the cluster
+* Mount a network drive to every machine in the cluster that contains 
+* Upload `heron-core.tar.gz` to an S3 bucket and expose an HTTP endpoint
+* Upload `heron-core.tar.gz` to be hosted on a file server and expose an HTTP endpoint
+
+> A copy of `heron-core.tar.gz` is located at `~/.heron/dist/heron-core.tar.gz` on the machine on which you installed the Heron CLI.
+
+You'll need to set the URL for `heron-core.tar.gz` in the `client.yaml` configuration file in `~/.heron/conf/nomad`. Here are some examples:
+
+```yaml
+# local filesystem
+heron.package.core.uri: file:///path/to/heron/heron-core.tar.gz
+
+# from a web server
+heron.package.core.uri: http://some.webserver.io/heron-core.tar.gz
+```
+
+## Submitting Heron topologies to the Nomad cluster
+
+You can submit Heron topologies to a Nomad cluster via the [Heron CLI tool](user-manuals-heron-cli):
+
+```bash
+$ heron submit nomad \
+  <topology package path> \
+  <topology classpath> \
+  <topology CLI args>
+```
+
+Here's an example:
+
+```bash
+$ heron submit nomad \
+  ~/.heron/examples/heron-streamlet-examples.jar \           # Package path
+  org.apache.heron.examples.api.WindowedWordCountTopology \ # Topology classpath
+  windowed-word-count                                        # Args passed to topology
+```
+
+## Deploying with the API server
+
+The advantage of running the [Heron API Server](deployment-api-server) is that it can act as a file server to help you distribute topology package files and submit jobs to Nomad, so that you don't need to modify the configuration files mentioned above.  By using Heron’s API Server, you can set configurations such as the URI of ZooKeeper and the Nomad server once and not need to configure each machine from which you want to submit Heron topologies.
+
+## Running the API server
+
+You can run the Heron API server on any machine that can be reached by machines in your Nomad cluster via HTTP. Here's a command you can use to run the API server:
+
+```bash
+$ ~/.heron/bin/heron-apiserver \
+  --cluster nomad \
+  --base-template nomad \
+  -D heron.statemgr.connection.string=<ZooKeeper URI> \
+  -D heron.nomad.scheduler.uri=<Nomad URI> \
+  -D heron.class.uploader=org.apache.heron.uploader.http.HttpUploader \
+  --verbose
+```
+
+You can also run the API server in Nomad itself, but you will need to have a local copy of the Heron API server executable on every machine in the cluster. Here's an example Nomad job for the API server:
+
+```hcl
+job "apiserver" {
+  datacenters = ["dc1"]
+  type = "service"
+  group "apiserver" {
+    count = 1
+    task "apiserver" {
+      driver = "raw_exec"
+      config {
+        command = <heron_apiserver_executable>
+        args = [
+        "--cluster", "nomad",
+        "--base-template", "nomad",
+        "-D", "heron.statemgr.connection.string=<zookeeper_uri>",
+        "-D", "heron.nomad.scheduler.uri=<scheduler_uri>",
+        "-D", "heron.class.uploader=org.apache.heron.uploader.http.HttpUploader",
+        "--verbose"]
+      }
+      resources {
+        cpu    = 500 # 500 MHz
+        memory = 256 # 256MB
+      }
+    }
+  }
+}
+```
+
+Make sure to replace the following:
+
+* `<heron_apiserver_executable>` --- The local path to where the [Heron API server](deployment-api-server) executable is located (usually `~/.heron/bin/heron-apiserver`)
+* `<zookeeper_uri>` --- The URI for your ZooKeeper cluster
+* `<scheduler_uri>` --- The URI for your Nomad server
+
+## Using the Heron API server to distribute Heron topology packages
+
+Heron users can upload their Heron topology packages to the Heron API server using the HTTP uploader by modifying the `uploader.yaml` file to including the following:
+
+```yaml
+# uploader class for transferring the topology jar/tar files to storage
+heron.class.uploader:    org.apache.heron.uploader.http.HttpUploader
+heron.uploader.http.uri: http://localhost:9000/api/v1/file/upload
+```
+
+The [Heron CLI](user-manuals-heron-cli) will take care of the upload. When the topology is starting up, the topology package will be automatically downloaded from the API server.
+
+## Using the API server to distribute the Heron core package
+
+Heron users can use the Heron API server to distribute the Heron core package. When running the API server, just add this argument:
+
+```bash
+--heron-core-package-path <path to Heron core>
+```
+
+Here's an example:
+
+```bash
+$ ~/.heron/bin/heron-apiserver \
+  --cluster nomad \
+  --base-template nomad \
+  --download-hostname 127.0.0.1 \
+  --heron-core-package-path ~/.heron/dist/heron-core.tar.gz \
+  -D heron.statemgr.connection.string=127.0.0.1:2181 \
+  -D heron.nomad.scheduler.uri=127.0.0.1:4647 \
+  -D heron.class.uploader=org.apache.heron.uploader.http.HttpUploader \
+  --verbose
+```
+
+Then change the `client.yaml` file in `~/.heron/conf/nomad` to the following:
+
+```yaml
+heron.package.use_core_uri: true
+heron.package.core.uri:     http://localhost:9000/api/v1/file/download/core
+```
+
+## Using the API server to submit Heron topologies
+
+Users can submit topologies using the [Heron CLI](user-manuals-heron-cli) by specifying a service URL to the API server. Here's the format of that command:
+
+```bash
+$ heron submit nomad \
+  --service-url=<Heron API server URL> \
+  <topology package path> \
+  <topology classpath> \
+  <topology args>
+```
+
+Here's an example:
+
+```bash
+$ heron submit nomad \
+  --service-url=http://localhost:9000 \
+  ~/.heron/examples/heron-streamlet-examples.jar \
+  org.apache.heron.examples.api.WindowedWordCountTopology \
+  windowed-word-count
+```
+
+## Integration with Consul for metrics
+Each Heron executor part of a Heron topology serves metrics out of a port randomly generated by Nomad.  Thus, Consul is needed for service discovery for users to determine which port the Heron executor is serving the metrics out of.
+Every Heron executor will automatically register itself as a service with Consul given that there is a Consul cluster running. The port Heron will be serving metrics will be registered with Consul.
+
+The service will be registered with the name with the following format:
+
+```yaml
+metrics-heron-<TOPOLOGY_NAME>-<CONTAINER_INDEX>
+```
+
+Each heron executor registered with Consul will be tagged with
+
+```yaml
+<TOPOLOGY_NAME>-<CONTAINER_INDEX>
+```
+
+To add additional tags, please add specify them in a comma delimited list via
+
+```yaml
+heron.nomad.metrics.service.additional.tags
+```
+
+in `scheduler.yaml`. For example:
+
+```yaml
+heron.nomad.metrics.service.additional.tags: "prometheus,metrics,heron"
+```
+
+Users can then configure Prometheus to scrape metrics for each Heron executor based on these tags
+
+
+Instructions on running Heron on Nomad via docker containers are located here:
+
+**Below are instructions on how to run Heron on Nomad via docker containers.**  In this mode, Heron executors will run as docker containers on host machines.
+
+## Requirements
+
+When setting up your Nomad cluster, the following are required:
+
+* The [Heron CLI tool](user-manuals-heron-cli) must be installed on each machine used to deploy Heron topologies
+* Python 2.7, Java 7 or 8, and [curl](https://curl.haxx.se/) must be installed on every machine in the cluster
+* A [ZooKeeper cluster](https://zookeeper.apache.org)
+* Docker installed and enabled on every machine
+* Each machine must also be able to pull the official Heron docker image from DockerHub or have the image preloaded.
+
+## Configuring Heron settings
+
+Before running Heron via Nomad, you'll need to configure some settings. Once you've [installed Heron](getting-started-local-single-node), all of the configurations you'll need to modify will be in the `~/.heron/conf/nomad` diredctory.
+
+First, make sure that the `heron.nomad.driver` is set to "docker" in `~/.heron/conf/nomad/scheduler.yaml` e.g.
+
+```yaml
+heron.nomad.driver: "docker"
+```
+
+You can also adjust which docker image to use for running Heron via the `heron.executor.docker.image` in `~/.heron/conf/nomad/scheduler.yaml` e.g.
+
+```yaml
+heron.executor.docker.image: 'heron/heron:latest'
+```
+
+You'll need to use a topology uploader to deploy topology packages to nodes in your cluster. You can use one of the following uploaders:
+
+* The HTTP uploader in conjunction with Heron's [API server](deployment-api-server). The Heron API server acts like a file server to which users can upload topology packages. The API server distributes the packages, along with the Heron core package, to the relevant machines. You can also use the API server to submit your Heron topology to Nomad (described [below](#deploying-with-the-api-server)) <!-- TODO: link to upcoming HTTP uploader documentation -->
+* [Amazon S3](uploaders-amazon-s3). Please note that the S3 uploader requires an AWS account.
+* [SCP](uploaders-scp). Please note that the SCP uploader requires SSH access to nodes in the cluster.
+
+You can modify the `heron.class.uploader` parameter in `~/.heron/conf/nomad/uploader.yaml` to choose an uploader.
+
+In addition, you must update the `heron.statemgr.connection.string` parameter in the `statemgr.yaml` file in `~/.heron/conf/nomad` to your ZooKeeper connection string. Here's an example:
+
+```yaml
+heron.statemgr.connection.string: 127.0.0.1:2181
+```
+
+Then, update the `heron.nomad.scheduler.uri` parameter in `scheduler.yaml` to the URL of the Nomad server to which you'll be submitting jobs. Here's an example:
+
+```yaml
+heron.nomad.scheduler.uri: http://127.0.0.1:4646
+```
+
+## Submitting Heron topologies to the Nomad cluster
+
+You can submit Heron topologies to a Nomad cluster via the [Heron CLI tool](user-manuals-heron-cli):
+
+```bash
+$ heron submit nomad \
+  <topology package path> \
+  <topology classpath> \
+  <topology CLI args>
+```
+
+Here's an example:
+
+```bash
+$ heron submit nomad \
+  ~/.heron/examples/heron-streamlet-examples.jar \           # Package path
+  org.apache.heron.examples.api.WindowedWordCountTopology \ # Topology classpath
+  windowed-word-count                                        # Args passed to topology
+```
+
+## Deploying with the API server
+
+The advantage of running the [Heron API Server](deployment-api-server) is that it can act as a file server to help you distribute topology package files and submit jobs to Nomad, so that you don't need to modify the configuration files mentioned above.  By using Heron’s API Server, you can set configurations such as the URI of ZooKeeper and the Nomad server once and not need to configure each machine from which you want to submit Heron topologies.
+
+## Running the API server
+
+You can run the Heron API server on any machine that can be reached by machines in your Nomad cluster via HTTP. Here's a command you can use to run the API server:
+
+```bash
+$ ~/.heron/bin/heron-apiserver \
+  --cluster nomad \
+  --base-template nomad \
+  -D heron.statemgr.connection.string=<ZooKeeper URI> \
+  -D heron.nomad.scheduler.uri=<Nomad URI> \
+  -D heron.class.uploader=org.apache.heron.uploader.http.HttpUploader \
+  --verbose
+```
+
+You can also run the API server in Nomad itself, but you will need to have a local copy of the Heron API server executable on every machine in the cluster. Here's an example Nomad job for the API server:
+
+```hcl
+job "apiserver" {
+  datacenters = ["dc1"]
+  type = "service"
+  group "apiserver" {
+    count = 1
+    task "apiserver" {
+      driver = "raw_exec"
+      config {
+        command = <heron_apiserver_executable>
+        args = [
+        "--cluster", "nomad",
+        "--base-template", "nomad",
+        "-D", "heron.statemgr.connection.string=<zookeeper_uri>",
+        "-D", "heron.nomad.scheduler.uri=<scheduler_uri>",
+        "-D", "heron.class.uploader=org.apache.heron.uploader.http.HttpUploader",
+        "--verbose"]
+      }
+      resources {
+        cpu    = 500 # 500 MHz
+        memory = 256 # 256MB
+      }
+    }
+  }
+}
+```
+
+Make sure to replace the following:
+
+* `<heron_apiserver_executable>` --- The local path to where the [Heron API server](deployment-api-server) executable is located (usually `~/.heron/bin/heron-apiserver`)
+* `<zookeeper_uri>` --- The URI for your ZooKeeper cluster
+* `<scheduler_uri>` --- The URI for your Nomad server
+
+## Using the Heron API server to distribute Heron topology packages
+
+Heron users can upload their Heron topology packages to the Heron API server using the HTTP uploader by modifying the `uploader.yaml` file to including the following:
+
+```yaml
+# uploader class for transferring the topology jar/tar files to storage
+heron.class.uploader:    org.apache.heron.uploader.http.HttpUploader
+heron.uploader.http.uri: http://localhost:9000/api/v1/file/upload
+```
+
+## Integration with Consul for metrics
+Each container part of a Heron topology serves metrics out of a port randomly generated by Nomad.  Thus, Consul is needed for service discovery for users to determine which port the container is serving the metrics out of.
+Every Heron executor running in a docker container will automatically register itself as a service with Consul given that there is a Consul cluster running. The port Heron will be serving metrics will be registered with Consul.
+  
+The service will be registered with the name with the following format:
+
+```yaml
+metrics-heron-<TOPOLOGY_NAME>-<CONTAINER_INDEX>
+```
+
+Each heron executor registered with Consul will be tagged with
+
+```yaml
+<TOPOLOGY_NAME>-<CONTAINER_INDEX>
+```
+
+To add additional tags, please add specify them in a comma delimited list via
+
+```yaml
+heron.nomad.metrics.service.additional.tags
+```
+
+in `scheduler.yaml`. For example:
+
+```yaml
+heron.nomad.metrics.service.additional.tags: "prometheus,metrics,heron"
+```
+
+Users can then configure Prometheus to scrape metrics for each container based on these tags
diff --git a/website2/website/versioned_docs/version-0.20.3-incubating/topology-development-streamlet-api.md b/website2/website/versioned_docs/version-0.20.3-incubating/topology-development-streamlet-api.md
new file mode 100644
index 0000000..fcfd26c
--- /dev/null
+++ b/website2/website/versioned_docs/version-0.20.3-incubating/topology-development-streamlet-api.md
@@ -0,0 +1,617 @@
+---
+id: version-0.20.3-incubating-topology-development-streamlet-api
+title: The Heron Streamlet API for Java
+sidebar_label: The Heron Streamlet API for Java
+original_id: topology-development-streamlet-api
+---
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+      http://www.apache.org/licenses/LICENSE-2.0
+    Unless required by applicable law or agreed to in writing,
+    software distributed under the License is distributed on an
+    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+    KIND, either express or implied.  See the License for the
+    specific language governing permissions and limitations
+    under the License.
+-->
+
+ > **The Heron Streamlet API is in beta.** 
+ > The Heron Streamlet API is well tested and can be used to build and test topologies locally. The API is not yet fully stable, however, and breaking changes are likely in the coming weeks.
+
+
+Heron processing topologies can be written using an API called the **Heron Streamlet API**. The Heron Streamlet API is currently available for the following languages:
+
+* [Java](topology-development-streamlet-api)
+* [Scala](topology-development-streamlet-scala)
+
+> Although this document covers the new Heron Streamlet API, topologies created using the original [topology API](topology-development-topology-api-java) can still be used with Heron (which means that all of your older topologies will still run).
+
+For a more in-depth conceptual guide to the new API, see [The Heron Streamlet API](topology-development-streamlet-api). A high-level overview can also be found in the section immediately [below](#the-heron-streamlet-api-vs-the-topology-api).
+
+## The Heron Streamlet API vs. The Topology API
+
+When Heron was first released, all Heron topologies needed to be written using an API based on the [Storm Topology API](topology-development-topology-api-java). Although this API is quite powerful (and can still be used), the **Heron Streamlet API** enables you to create topologies without needing to implement spouts and bolts directly or to connect spouts and bolts together.
+
+Here are some crucial differences between the two APIs:
+
+Domain | Original Topology API | Heron Streamlet API
+:------|:----------------------|:--------------------
+Programming style | Procedural, processing component based | Functional
+Abstraction level | **Low level**. Developers must think in terms of "physical" spout and bolt implementation logic. | **High level**. Developers can write processing logic in an idiomatic fashion in the language of their choice, without needing to write and connect spouts and bolts.
+Processing model | [Spout](heron-topology-concepts#spouts) and [bolt](heron-topology-concepts#bolts) logic must be created explicitly, and connecting spouts and bolts is the responsibility of the developer | Spouts and bolts are created for you automatically on the basis of the processing graph that you build
+
+The two APIs also have a few things in common:
+
+* Topologies' [logical](heron-topology-concepts#logical-plan) and [physical](heron-topology-concepts#physical-plan) plans are automatically created by Heron
+* Topologies are [managed](user-manuals-heron-cli) in the same way using the `heron` CLI tool
+
+## Getting started
+
+In order to use the Heron Streamlet API for Java, you'll need to install the `heron-api` library.
+
+### Maven setup
+
+In order to use the `heron-api` library, add this to the `dependencies` block of your `pom.xml` configuration file:
+
+```xml
+<dependency>
+    <groupId>org.apache.heron</groupId>
+    <artifactId>heron-api</artifactId>
+    <version>{{< heronVersion >}}</version>
+</dependency>
+```
+
+#### Compiling a JAR with dependencies
+
+In order to run a Java topology created using the Heron Streamlet API in a Heron cluster, you'll need to package your topology as a "fat" JAR with dependencies included. You can use the [Maven Assembly Plugin](https://maven.apache.org/plugins/maven-assembly-plugin/usage.html) to generate JARs with dependencies. To install the plugin and add a Maven goal for a single JAR, add this to the `plugins` block in your `pom.xml`:
+
+```xml
+<plugin>
+    <artifactId>maven-assembly-plugin</artifactId>
+    <configuration>
+        <descriptorRefs>
+            <descriptorRef>jar-with-dependencies</descriptorRef>
+        </descriptorRefs>
+        <archive>
+            <manifest>
+                <mainClass></mainClass>
+            </manifest>
+        </archive>
+    </configuration>
+    <executions>
+        <execution>
+            <id>make-assembly</id>
+            <phase>package</phase>
+            <goals>
+                <goal>single</goal>
+            </goals>
+        </execution>
+    </executions>
+</plugin>
+```
+
+Once your `pom.xml` is properly set up, you can compile the JAR with dependencies using this command:
+
+```bash
+$ mvn assembly:assembly
+```
+
+By default, this will add a JAR in your project's `target` folder with the name `PROJECT-NAME-VERSION-jar-with-dependencies.jar`. Here's an example topology submission command using a compiled JAR:
+
+```bash
+$ mvn assembly:assembly
+$ heron submit local \
+  target/my-project-1.2.3-jar-with-dependencies.jar \
+  com.example.Main \
+  MyTopology arg1 arg2
+```
+
+### Java Streamlet API starter project
+
+If you'd like to up and running quickly with the Heron Streamlet API for Java, you can view the example topologies [here](https://github.com/apache/incubator-heron/tree/{{ heron:version }}/examples/src/java/org/apache/heron/examples/streamlet)
+
+If you're running a [local Heron cluster](getting-started-local-single-node), you can submit the built example topology like this:
+
+```bash
+$ heron submit local \
+  ~/.heron/examples/heron-streamlet-examples.jar \
+  org.apache.heron.examples.streamlet.WindowedWordCountTopology \
+  streamletWindowedWordCount
+```
+
+#### Selecting delivery semantics
+
+Heron enables you to apply one of three [delivery semantics](heron-delivery-semantics) to any Heron topology. For the example topology above, you can select the delivery semantics when you submit the topology with the topology's second argument. This command, for example, would apply [effectively-once](heron-delivery-semantics) to the example topology:
+
+```bash
+$ heron submit local \
+  ~/.heron/examples/heron-streamlet-examples.jar \
+  org.apache.heron.examples.streamlet.WireRequestsTopology \
+  wireRequestsTopology
+```
+
+The other options are `at-most-once` and `at-least-once`. If you don't explicitly select the delivery semantics, at-least-once semantics will be applied.
+
+## Streamlet API topology configuration
+
+Every Streamlet API topology needs to be configured using a `Config` object. Here's an example default configuration:
+
+```java
+import org.apache.heron.streamlet.Config;
+import org.apache.heron.streamlet.Runner;
+
+Config topologyConfig = Config.defaultConfig();
+
+// Apply topology configuration using the topologyConfig object
+Runner topologyRunner = new Runner();
+topologyRunner.run("name-for-topology", topologyConfig, topologyBuilder);
+```
+
+The table below shows the configurable parameters for Heron topologies:
+
+Parameter | Default
+:---------|:-------
+[Delivery semantics](#delivery-semantics) | At most once
+Serializer | [Kryo](https://github.com/EsotericSoftware/kryo)
+Number of total container topologies | 2
+Per-container CPU | 1.0
+Per-container RAM | 100 MB
+
+Here's an example non-default configuration:
+
+```java
+Config topologyConfig = Config.newBuilder()
+        .setNumContainers(5)
+        .setPerContainerRamInGigabytes(10)
+        .setPerContainerCpu(3.5f)
+        .setDeliverySemantics(Config.DeliverySemantics.EFFECTIVELY_ONCE)
+        .setSerializer(Config.Serializer.JAVA)
+        .setUserConfig("some-key", "some-value")
+        .build();
+```
+
+### Delivery semantics
+
+You can apply [delivery semantics](heron-delivery-semantics) to a Streamlet API topology like this:
+
+```java
+topologyConfig
+        .setDeliverySemantics(Config.DeliverySemantics.EFFECTIVELY_ONCE);
+```
+
+The other available options in the `DeliverySemantics` enum are `ATMOST_ONCE` and `ATLEAST_ONCE`.
+
+## Streamlets
+
+In the Heron Streamlet API for Java, processing graphs consist of streamlets. One or more supplier streamlets inject data into your graph to be processed by downstream operators.
+
+## Operations
+
+Operation | Description | Example
+:---------|:------------|:-------
+[`map`](#map-operations) | Create a new streamlet by applying the supplied mapping function to each element in the original streamlet | Add 1 to each element in a streamlet of integers
+[`flatMap`](#flatmap-operations) | Like a map operation but with the important difference that each element of the streamlet is flattened | Flatten a sentence into individual words
+[`filter`](#filter-operations) | Create a new streamlet containing only the elements that satisfy the supplied filtering function | Remove all inappropriate words from a streamlet of strings
+[`union`](#union-operations) | Unifies two streamlets into one, without modifying the elements of the two streamlets | Unite two different `Streamlet<String>`s into a single streamlet
+[`clone`](#clone-operations) | Creates any number of identical copies of a streamlet | Create three separate streamlets from the same source
+[`transform`](#transform-operations) | Transform a streamlet using whichever logic you'd like (useful for transformations that don't neatly map onto the available operations) |
+[`join`](#join-operations) | Create a new streamlet by combining two separate key-value streamlets into one on the basis of each element's key. Supported Join Types: Inner (as default), Outer-Left, Outer-Right and Outer. | Combine key-value pairs listing current scores (e.g. `("h4x0r", 127)`) for each user into a single per-user stream
+[`keyBy`](#key-by-operations) | Returns a new key-value streamlet by applying the supplied extractors to each element in the original streamlet |
+[`reduceByKey`](#reduce-by-key-operations) |  Produces a streamlet of key-value on each key, and in accordance with a reduce function that you apply to all the accumulated values | Count the number of times a value has been encountered
+[`reduceByKeyAndWindow`](#reduce-by-key-and-window-operations) |  Produces a streamlet of key-value on each key, within a time window, and in accordance with a reduce function that you apply to all the accumulated values | Count the number of times a value has been encountered within a specified time window
+[`countByKey`](#count-by-key-operations) | A special reduce operation of counting number of tuples on each key | Count the number of times a value has been encountered
+[`countByKeyAndWindow`](#count-by-key-and-window-operations) | A special reduce operation of counting number of tuples on each key, within a time window | Count the number of times a value has been encountered within a specified time window
+[`split`](#split-operations) | Split a streamlet into multiple streamlets with different id |
+[`withStream`](#with-stream-operations) | Select a stream with id from a streamlet that contains multiple streams |
+[`applyOperator`](#apply-operator-operations) | Returns a new streamlet by applying an user defined operator to the original streamlet | Apply an existing bolt as an operator
+[`repartition`](#repartition-operations) | Create a new streamlet by applying a new parallelism level to the original streamlet | Increase the parallelism of a streamlet from 5 to 10
+[`toSink`](#sink-operations) | Sink operations terminate the processing graph by storing elements in a database, logging elements to stdout, etc. | Store processing graph results in an AWS Redshift table
+[`log`](#log-operations) | Logs the final results of a processing graph to stdout. This *must* be the last step in the graph. |
+[`consume`](#consume-operations) | Consume operations are like sink operations except they don't require implementing a full sink interface (consume operations are thus suited for simple operations like logging) | Log processing graph results using a custom formatting function
+
+### Map operations
+
+Map operations create a new streamlet by applying the supplied mapping function to each element in the original streamlet. Here's an example:
+
+```java
+builder.newSource(() -> 1)
+    .map(i -> i + 12);
+```
+
+In this example, a supplier streamlet emits an indefinite series of 1s. The `map` operation then adds 12 to each incoming element, producing a streamlet of 13s.
+
+### FlatMap operations
+
+FlatMap operations are like `map` operations but with the important difference that each element of the streamlet is "flattened" into a collection type. In this example, a supplier streamlet emits the same sentence over and over again; the `flatMap` operation transforms each sentence into a Java `List` of individual words:
+
+```java
+builder.newSource(() -> "I have nothing to declare but my genius")
+    .flatMap((sentence) -> Arrays.asList(sentence.split("\\s+")));
+```
+
+The effect of this operation is to transform the `Streamlet<String>` into a `Streamlet<List<String>>`.
+
+> One of the core differences between `map` and `flatMap` operations is that `flatMap` operations typically transform non-collection types into collection types.
+
+### Filter operations
+
+Filter operations retain elements in a streamlet, while potentially excluding some or all elements, on the basis of a provided filtering function. Here's an example:
+
+```java
+builder.newSource(() -> ThreadLocalRandom.current().nextInt(1, 11))
+        .filter((i) -> i < 7);
+```
+
+In this example, a source streamlet consisting of random integers between 1 and 10 is modified by a `filter` operation that removes all streamlet elements that are greater than 6.
+
+### Union operations
+
+Union operations combine two streamlets of the same type into a single streamlet without modifying the elements. Here's an example:
+
+```java
+Streamlet<String> flowers = builder.newSource(() -> "flower");
+Streamlet<String> butterflies = builder.newSource(() -> "butterfly");
+
+Streamlet<String> combinedSpringStreamlet = flowers
+        .union(butterflies);
+```
+
+Here, one streamlet is an endless series of "flowers" while the other is an endless series of "butterflies". The `union` operation combines them into a single `Spring` streamlet of alternating "flowers" and "butterflies".
+
+### Clone operations
+
+Clone operations enable you to create any number of "copies" of a streamlet. Each of the "copy" streamlets contains all the elements of the original and can be manipulated just like the original streamlet. Here's an example:
+
+```java
+import java.util.List;
+import java.util.concurrent.ThreadLocalRandom;
+
+Streamlet<Integer> integers = builder.newSource(() -> ThreadLocalRandom.current().nextInt(100));
+
+List<Streamlet<Integer>> copies = integers.clone(5);
+Streamlet<Integer> ints1 = copies.get(0);
+Streamlet<Integer> ints2 = copies.get(1);
+Streamlet<Integer> ints3 = copies.get(2);
+// and so on...
+```
+
+In this example, a streamlet of random integers between 1 and 100 is split into 5 identical streamlets.
+
+### Transform operations
+
+Transform operations are highly flexible operations that are most useful for:
+
+* operations involving state in [stateful topologies](heron-delivery-semantics#stateful-topologies)
+* operations that don't neatly fit into the other categories or into a lambda-based logic
+
+Transform operations require you to implement three different methods:
+
+* A `setup` method that enables you to pass a context object to the operation and to specify what happens prior to the `transform` step
+* A `transform` operation that performs the desired transformation
+* A `cleanup` method that allows you to specify what happens after the `transform` step
+
+The context object available to a transform operation provides access to:
+
+* the current state of the topology
+* the topology's configuration
+* the name of the stream
+* the stream partition
+* the current task ID
+
+Here's a Java example of a transform operation in a topology where a stateful record is kept of the number of items processed:
+
+```java
+import org.apache.heron.streamlet.Context;
+import org.apache.heron.streamlet.SerializableTransformer;
+
+import java.util.function.Consumer;
+
+public class CountNumberOfItems implements SerializableTransformer<String, String> {
+    private int numberOfItems;
+
+    public void setup(Context context) {
+        numberOfItems = (int) context.getState().get("number-of-items");
+        context.getState().put("number-of-items", numberOfItems + 1);
+    }
+
+    public void transform(String in, Consumer<String> consumer) {
+        String transformedString = // Apply some operation to the incoming value
+        consumer.accept(transformedString);
+    }
+
+    public void cleanup() {
+        System.out.println(
+                String.format("Successfully processed new state: %d", numberOfItems));
+    }
+}
+```
+
+This operation does a few things:
+
+* In the `setup` method, the [`Context`](/api/java/org/apache/heron/streamlet/Context.html) object is used to access the current state (which has the semantics of a Java `Map`). The current number of items processed is incremented by one and then saved as the new state.
+* In the `transform` method, the incoming string is transformed in some way and then "accepted" as the new value.
+* In the `cleanup` step, the current count of items processed is logged.
+
+Here's that operation within the context of a streamlet processing graph:
+
+```java
+builder.newSource(() -> "Some string over and over");
+        .transform(new CountNumberOfItems())
+        .log();
+```
+
+### Join operations
+
+Join operations unify two streamlets *on a key* (join operations thus require KV streamlets). Each `KeyValue` object in a streamlet has, by definition, a key. When a join operation is added to a processing graph, 
+
+```java
+import org.apache.heron.streamlet.WindowConfig;
+
+Builder builder = Builder.newBuilder();
+
+KVStreamlet<String, String> streamlet1 =
+        builder.newKVSource(() -> new KeyValue<>("heron-api", "topology-api"));
+
+builder.newSource(() -> new KeyValue<>("heron-api", "streamlet-api"))
+    .join(streamlet1, WindowConfig.TumblingCountWindow(10), KeyValue::create);
+```
+
+In this case, the resulting streamlet would consist of an indefinite stream with two `KeyValue` objects with the key `heron-api` but different values (`topology-api` and `streamlet-api`).
+
+> The effect of a join operation is to create a new streamlet *for each key*.
+
+### Key by operations
+
+Key by operations convert each item in the original streamlet into a key-value pair and return a new streamlet. Here is an example:
+
+```java
+import java.util.Arrays;
+
+Builder builder = Builder.newBuilder()
+    .newSource(() -> "Mary had a little lamb")
+    // Convert each sentence into individual words
+    .flatMap(sentence -> Arrays.asList(sentence.toLowerCase().split("\\s+")))
+    .keyBy(
+        // Key extractor (in this case, each word acts as the key)
+        word -> word,
+        // Value extractor (get the length of each word)
+        word -> workd.length()
+    )
+    // The result is logged
+    .log();
+```
+
+### Reduce by key operations
+
+You can apply [reduce](https://docs.oracle.com/javase/tutorial/collections/streams/reduction.html) operations to streamlets by specifying:
+
+* a key extractor that determines what counts as the key for the streamlet
+* a value extractor that determines which final value is chosen for each element of the streamlet
+* a reduce function that produces a single value for each key in the streamlet
+
+Reduce by key operations produce a new streamlet of key-value window objects (which include a key-value pair including the extracted key and calculated value). Here's an example:
+
+```java
+import java.util.Arrays;
+
+Builder builder = Builder.newBuilder()
+    .newSource(() -> "Mary had a little lamb")
+    // Convert each sentence into individual words
+    .flatMap(sentence -> Arrays.asList(sentence.toLowerCase().split("\\s+")))
+    .reduceByKeyAndWindow(
+        // Key extractor (in this case, each word acts as the key)
+        word -> word,
+        // Value extractor (each word appears only once, hence the value is always 1)
+        word -> 1,
+        // Reduce operation (a running sum)
+        (x, y) -> x + y
+    )
+    // The result is logged
+    .log();
+```
+
+### Reduce by key and window operations
+
+You can apply [reduce](https://docs.oracle.com/javase/tutorial/collections/streams/reduction.html) operations to streamlets by specifying:
+
+* a key extractor that determines what counts as the key for the streamlet
+* a value extractor that determines which final value is chosen for each element of the streamlet
+* a [time window](../../../concepts/topologies#window-operations) across which the operation will take place
+* a reduce function that produces a single value for each key in the streamlet
+
+Reduce by key and window operations produce a new streamlet of key-value window objects (which include a key-value pair including the extracted key and calculated value, as well as information about the window in which the operation took place). Here's an example:
+
+```java
+import java.util.Arrays;
+
+import org.apache.heron.streamlet.WindowConfig;
+
+Builder builder = Builder.newBuilder()
+    .newSource(() -> "Mary had a little lamb")
+    // Convert each sentence into individual words
+    .flatMap(sentence -> Arrays.asList(sentence.toLowerCase().split("\\s+")))
+    .reduceByKeyAndWindow(
+        // Key extractor (in this case, each word acts as the key)
+        word -> word,
+        // Value extractor (each word appears only once, hence the value is always 1)
+        word -> 1,
+        // Window configuration
+        WindowConfig.TumblingCountWindow(50),
+        // Reduce operation (a running sum)
+        (x, y) -> x + y
+    )
+    // The result is logged
+    .log();
+```
+
+### Count by key operations
+
+Count by key operations extract keys from data in the original streamlet and count the number of times a key has been encountered. Here's an example:
+
+```java
+import java.util.Arrays;
+
+Builder builder = Builder.newBuilder()
+    .newSource(() -> "Mary had a little lamb")
+    // Convert each sentence into individual words
+    .flatMap(sentence -> Arrays.asList(sentence.toLowerCase().split("\\s+")))
+    .countByKeyAndWindow(word -> word)
+    // The result is logged
+    .log();
+```
+
+### Count by key and window operations
+
+Count by key and window operations extract keys from data in the original streamlet and count the number of times a key has been encountered within each [time window](../../../concepts/topologies#window-operations). Here's an example:
+
+```java
+import java.util.Arrays;
+
+import org.apache.heron.streamlet.WindowConfig;
+
+Builder builder = Builder.newBuilder()
+    .newSource(() -> "Mary had a little lamb")
+    // Convert each sentence into individual words
+    .flatMap(sentence -> Arrays.asList(sentence.toLowerCase().split("\\s+")))
+    .countByKeyAndWindow(
+        // Key extractor (in this case, each word acts as the key)
+        word -> word,
+        // Window configuration
+        WindowConfig.TumblingCountWindow(50),
+    )
+    // The result is logged
+    .log();
+```
+
+### Split operations
+
+Split operations split a streamlet into multiple streamlets with different id by getting the corresponding stream ids from each item in the origina streamlet. Here is an example:
+
+```java
+import java.util.Arrays;
+
+Map<String, SerializablePredicate<String>> splitter = new HashMap();
+    splitter.put("long_word", s -> s.length() >= 4);
+    splitter.put("short_word", s -> s.length() < 4);
+
+Builder builder = Builder.newBuilder()
+    .newSource(() -> "Mary had a little lamb")
+    // Convert each sentence into individual words
+    .flatMap(sentence -> Arrays.asList(sentence.toLowerCase().split("\\s+")))
+    // Splits the stream into streams of long and short words
+    .split(splitter)
+    // Choose the stream of the short words
+    .withStream("short_word")
+    // The result is logged
+    .log();
+```
+
+### With stream operations
+
+With stream operations select a stream with id from a streamlet that contains multiple streams. They are often used with [split](#split-operations).
+
+### Apply operator operations
+
+Apply operator operations apply a user defined operator (like a bolt) to each element of the original streamlet and return a new streamlet. Here is an example:
+
+```java
+import java.util.Arrays;
+
+private class MyBoltOperator extends MyBolt implements IStreamletRichOperator<Double, Double> {
+}
+
+Builder builder = Builder.newBuilder()
+    .newSource(() -> "Mary had a little lamb")
+    // Convert each sentence into individual words
+    .flatMap(sentence -> Arrays.asList(sentence.toLowerCase().split("\\s+")))
+    // Apply user defined operation
+    .applyOperator(new MyBoltOperator())
+    // The result is logged
+    .log();
+```
+
+### Repartition operations
+
+When you assign a number of [partitions](#partitioning-and-parallelism) to a processing step, each step that comes after it inherits that number of partitions. Thus, if you assign 5 partitions to a `map` operation, then any `mapToKV`, `flatMap`, `filter`, etc. operations that come after it will also be assigned 5 partitions. But you can also change the number of partitions for a processing step (as well as the number of partitions for downstream operations) using `repartition`. Here's an [...]
+
+```java
+import java.util.concurrent.ThreadLocalRandom;
+
+Builder builder = Builder.newBuilder();
+
+builder.newSource(() -> ThreadLocalRandom.current().nextInt(1, 11))
+        .setNumPartitions(5)
+        .map(i -> i + 1)
+        .repartition(2)
+        .filter(i -> i > 7 && i < 2)
+        .log();
+```
+
+In this example, the supplier streamlet emits random integers between one and ten. That operation is assigned 5 partitions. After the `map` operation, the `repartition` function is used to assign 2 partitions to all downstream operations.
+
+### Sink operations
+
+In processing graphs like the ones you build using the Heron Streamlet API, **sinks** are essentially the terminal points in your graph, where your processing logic comes to an end. A processing graph can end with writing to a database, publishing to a topic in a pub-sub messaging system, and so on. With the Streamlet API, you can implement your own custom sinks. Here's an example:
+
+```java
+import org.apache.heron.streamlet.Context;
+import org.apache.heron.streamlet.Sink;
+
+public class FormattedLogSink implements Sink<T> {
+    private String streamletName;
+
+    public void setup(Context context) {
+        streamletName = context.getStreamName();
+    }
+
+    public void put(T element) {
+        String message = String.format("Streamlet %s has produced an element with a value of: '%s'",
+                streamletName,
+                element.toString());
+        System.out.println(message);
+    }
+
+    public void cleanup() {}
+}
+```
+
+In this example, the sink fetches the name of the enclosing streamlet from the context passed in the `setup` method. The `put` method specifies how the sink handles each element that is received (in this case, a formatted message is logged to stdout). The `cleanup` method enables you to specify what happens after the element has been processed by the sink.
+
+Here is the `FormattedLogSink` at work in an example processing graph:
+
+```java
+Builder builder = Builder.newBuilder();
+
+builder.newSource(() -> "Here is a string to be passed to the sink")
+        .toSink(new FormattedLogSink());
+```
+
+> [Log operations](#log-operations) rely on a log sink that is provided out of the box. You'll need to implement other sinks yourself.
+
+### Log operations
+
+Log operations are special cases of consume operations that log streamlet elements to stdout.
+
+> Streamlet elements will be using their `toString` representations and at the `INFO` level.
+
+### Consume operations
+
+Consume operations are like [sink operations](#sink-operations) except they don't require implementing a full sink interface. Consume operations are thus suited for simple operations like formatted logging. Here's an example:
+
+```java
+import java.util.concurrent.ThreadLocalRandom;
+
+Builder builder = Builder.newBuilder()
+        .newSource(() -> ThreadLocalRandom.current().nextInt(1, 11))
+        .filter(i -> i % 2 == 0)
+        .consume(i -> {
+            String message = String.format("Even number found: %d", i);
+            System.out.println(message);
+        });
+```
diff --git a/website2/website/versioned_docs/version-0.20.3-incubating/uploaders-http.md b/website2/website/versioned_docs/version-0.20.3-incubating/uploaders-http.md
new file mode 100644
index 0000000..7b1c42d
--- /dev/null
+++ b/website2/website/versioned_docs/version-0.20.3-incubating/uploaders-http.md
@@ -0,0 +1,65 @@
+---
+id: version-0.20.3-incubating-uploaders-http
+title: HTTP
+sidebar_label: HTTP
+original_id: uploaders-http
+---
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+      http://www.apache.org/licenses/LICENSE-2.0
+    Unless required by applicable law or agreed to in writing,
+    software distributed under the License is distributed on an
+    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+    KIND, either express or implied.  See the License for the
+    specific language governing permissions and limitations
+    under the License.
+-->
+
+When a topology is submitted to Heron, the topology jars will be uploaded to a stable location. 
+The submitter will provide this location to the scheduler and it will pass it to the each 
+container. Heron can use a Http uploader to upload topology jar distribution to a stable 
+Http location.
+
+### Http Uploader Configuration
+
+You can make Heron aware of the Http uploader by modifying the `uploader.yaml` config file specific 
+for the Heron cluster. You’ll need to specify the following for each cluster:
+
+* `heron.class.uploader` — Indicate the uploader class to be loaded. You should set this 
+to `org.apache.heron.uploader.http.HttpUploader`
+
+* `heron.uploader.http.uri` — Provides the name of the URI where the topology jar should be 
+uploaded.
+
+### Example Http Uploader Configuration
+
+Below is an example configuration (in `uploader.yaml`) for a Http uploader:
+
+```yaml
+# uploader class for transferring the topology jar/tar files to storage
+heron.class.uploader: org.apache.heron.uploader.http.HttpUploader
+
+heron.uploader.http.uri: http://localhost:9000/api/v1/file/upload
+```
+
+Also Heron's API server can be used as a file server for the HttpUploader to upload topology 
+package/jars as follows:
+
+```
+${HOME}/.heron/bin/heron-apiserver 
+--cluster nomad
+--base-template nomad
+-D heron.statemgr.connection.string=<zookeeper_host:zookeeper_port> 
+-D heron.nomad.scheduler.uri=<scheduler_uri> 
+-D heron.class.uploader=org.apache.heron.uploader.http.HttpUploader
+--verbose
+```
+
+Also Http Server that topology package/jars are uploaded needs to return an URI upon upload 
+so that Heron will know the location to download in the future.
diff --git a/website2/website/versioned_sidebars/version-0.20.3-incubating-sidebars.json b/website2/website/versioned_sidebars/version-0.20.3-incubating-sidebars.json
new file mode 100644
index 0000000..34a1bf7
--- /dev/null
+++ b/website2/website/versioned_sidebars/version-0.20.3-incubating-sidebars.json
@@ -0,0 +1,99 @@
+{
+  "version-0.20.3-incubating-docs": {
+    "Getting Started": [
+      "version-0.20.3-incubating-getting-started-local-single-node",
+      "version-0.20.3-incubating-getting-started-migrate-storm-topologies",
+      "version-0.20.3-incubating-getting-started-troubleshooting-guide"
+    ],
+    "Deployment": [
+      "version-0.20.3-incubating-deployment-overview",
+      "version-0.20.3-incubating-deployment-configuration",
+      "version-0.20.3-incubating-deployment-api-server"
+    ],
+    "Topology Development APIs": [
+      "version-0.20.3-incubating-topology-development-streamlet-api",
+      "version-0.20.3-incubating-topology-development-eco-api",
+      "version-0.20.3-incubating-topology-development-topology-api-java",
+      "version-0.20.3-incubating-topology-development-topology-api-python",
+      "version-0.20.3-incubating-topology-development-streamlet-scala"
+    ],
+    "Client API Docs": [
+      "version-0.20.3-incubating-client-api-docs-overview"
+    ],
+    "Guides": [
+      "version-0.20.3-incubating-guides-effectively-once-java-topologies",
+      "version-0.20.3-incubating-guides-data-model",
+      "version-0.20.3-incubating-guides-tuple-serialization",
+      "version-0.20.3-incubating-guides-ui-guide",
+      "version-0.20.3-incubating-guides-topology-tuning",
+      "version-0.20.3-incubating-guides-packing-algorithms",
+      "version-0.20.3-incubating-guides-simulator-mode",
+      "version-0.20.3-incubating-guides-troubeshooting-guide"
+    ],
+    "Heron Concepts": [
+      "version-0.20.3-incubating-heron-design-goals",
+      "version-0.20.3-incubating-heron-topology-concepts",
+      "version-0.20.3-incubating-heron-streamlet-concepts",
+      "version-0.20.3-incubating-heron-architecture",
+      "version-0.20.3-incubating-heron-delivery-semantics"
+    ],
+    "State Managers": [
+      "version-0.20.3-incubating-state-managers-zookeeper",
+      "version-0.20.3-incubating-state-managers-local-fs"
+    ],
+    "Uploaders": [
+      "version-0.20.3-incubating-uploaders-local-fs",
+      "version-0.20.3-incubating-uploaders-hdfs",
+      "version-0.20.3-incubating-uploaders-http",
+      "version-0.20.3-incubating-uploaders-amazon-s3",
+      "version-0.20.3-incubating-uploaders-scp"
+    ],
+    "Schedulers": [
+      "version-0.20.3-incubating-schedulers-k8s-by-hand",
+      "version-0.20.3-incubating-schedulers-k8s-with-helm",
+      "version-0.20.3-incubating-schedulers-aurora-cluster",
+      "version-0.20.3-incubating-schedulers-aurora-local",
+      "version-0.20.3-incubating-schedulers-local",
+      "version-0.20.3-incubating-schedulers-nomad",
+      "version-0.20.3-incubating-schedulers-mesos-local-mac",
+      "version-0.20.3-incubating-schedulers-slurm",
+      "version-0.20.3-incubating-schedulers-yarn"
+    ],
+    "Cluster Configuration": [
+      "version-0.20.3-incubating-cluster-config-overview",
+      "version-0.20.3-incubating-cluster-config-system-level",
+      "version-0.20.3-incubating-cluster-config-instance",
+      "version-0.20.3-incubating-cluster-config-metrics",
+      "version-0.20.3-incubating-cluster-config-stream",
+      "version-0.20.3-incubating-cluster-config-tmanager"
+    ],
+    "Observability": [
+      "version-0.20.3-incubating-observability-prometheus",
+      "version-0.20.3-incubating-observability-graphite",
+      "version-0.20.3-incubating-observability-scribe"
+    ],
+    "User Manuals": [
+      "version-0.20.3-incubating-user-manuals-heron-cli",
+      "version-0.20.3-incubating-user-manuals-heron-explorer",
+      "version-0.20.3-incubating-user-manuals-tracker-rest",
+      "version-0.20.3-incubating-user-manuals-heron-tracker-runbook",
+      "version-0.20.3-incubating-user-manuals-heron-ui-runbook",
+      "version-0.20.3-incubating-user-manuals-heron-shell"
+    ],
+    "Compiling": [
+      "version-0.20.3-incubating-compiling-overview",
+      "version-0.20.3-incubating-compiling-linux",
+      "version-0.20.3-incubating-compiling-osx",
+      "version-0.20.3-incubating-compiling-docker",
+      "version-0.20.3-incubating-compiling-running-tests",
+      "version-0.20.3-incubating-compiling-code-organization"
+    ],
+    "Extending Heron": [
+      "version-0.20.3-incubating-extending-heron-scheduler",
+      "version-0.20.3-incubating-extending-heron-metric-sink"
+    ],
+    "Heron Resources": [
+      "version-0.20.3-incubating-heron-resources-resources"
+    ]
+  }
+}
diff --git a/website2/website/versions.json b/website2/website/versions.json
index 858a64d..707894d 100644
--- a/website2/website/versions.json
+++ b/website2/website/versions.json
@@ -1,4 +1,5 @@
 [
+  "0.20.3-incubating",
   "0.20.2-incubating",
   "0.20.1-incubating",
   "0.20.0-incubating"