You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@heron.apache.org by jo...@apache.org on 2022/10/22 18:04:55 UTC

[incubator-heron] 01/01: updating site for 0.20.5-incubating release

This is an automated email from the ASF dual-hosted git repository.

joshfischer pushed a commit to branch 0.20.5-incubating-site-changes
in repository https://gitbox.apache.org/repos/asf/incubator-heron.git

commit 826cd06f8c1b9e08fc5f6b40f54cdda9a41e3d50
Author: Josh Fischer <jo...@joshfischer.io>
AuthorDate: Sat Oct 22 13:04:41 2022 -0500

    updating site for 0.20.5-incubating release
---
 .../blog/2022-10-22-0.20.5-incubating-release.md   |  43 ++
 website2/website/heron-release.json                |   3 +-
 website2/website/pages/en/download.js              |  70 +-
 website2/website/release-notes.md                  | 116 +++-
 .../version-0.20.5-incubating/compiling-docker.md  | 251 +++++++
 .../version-0.20.5-incubating/compiling-linux.md   | 234 +++++++
 .../version-0.20.5-incubating/compiling-osx.md     | 102 +++
 .../compiling-overview.md                          | 135 ++++
 .../compiling-running-tests.md                     |  92 +++
 .../getting-started-docker.md                      |  50 ++
 .../getting-started-local-single-node.md           | 259 ++++++++
 .../schedulers-k8s-execution-environment.md        | 738 +++++++++++++++++++++
 .../topology-development-topology-api-java.md      | 441 ++++++++++++
 .../user-manuals-tracker-rest.md                   |  30 +
 .../version-0.20.5-incubating-sidebars.json        | 101 +++
 website2/website/versions.json                     |   1 +
 16 files changed, 2623 insertions(+), 43 deletions(-)

diff --git a/website2/website/blog/2022-10-22-0.20.5-incubating-release.md b/website2/website/blog/2022-10-22-0.20.5-incubating-release.md
new file mode 100644
index 00000000000..784acf9c54a
--- /dev/null
+++ b/website2/website/blog/2022-10-22-0.20.5-incubating-release.md
@@ -0,0 +1,43 @@
+---
+title: 0.20.5-incubating Release
+author: Josh Fischer
+authorURL: https://www.linkedin.com/in/joshfischer1108/
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing,
+    software distributed under the License is distributed on an
+    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+    KIND, either express or implied.  See the License for the
+    specific language governing permissions and limitations
+    under the License.
+-->
+---
+The Heron community is pleased to announce the release of version 0.20.5-incubating! 
+We are excited as this is moment in time when we start to work on graduating out of the incubator. 
+Heron has had several cloud native specific improvements and we the community has lasted across several years. Thank you to all who have contributed to this and all prior releases.
+
+We have distributed a full set of convenience binaries along with enhanced support to run Heron more easily in Kubernetes.  
+
+Here are a few updates since the last release:
+
+* [Added support for adding Kubernetes annotations](https://github.com/apache/incubator-heron/pull/3699)
+* [Add support for dynamic kubernetes labels](https://github.com/apache/incubator-heron/pull/3701)
+* [Add ability to add Kubernetes Secrets and SecretKeyRefs](https://github.com/apache/incubator-heron/pull/3702)
+* [ConfigMap Pod Template Support](https://github.com/apache/incubator-heron/pull/3710)
+* [Restart a Topology on Kubernetes ](https://github.com/apache/incubator-heron/pull/3740)
+* [Add support for Persistent Volumes for stateful storage ](https://github.com/apache/incubator-heron/pull/3725)
+
+## Additional Information
+* To download Apache Heron 0.20.5-incubating, click [here](https://heron.apache.org/download)
+* To view the official Heron Docker Image repository click [here](https://hub.docker.com/repository/docker/apache/heron)
+* Maven artifacts can be found [here](https://search.maven.org/search?q=apache-heron)
+* For more information about Apache Heron 0.20.5-incubating, see [0.20.5-incubating release notes](https://heron.apache.org/release-notes/#0.20.5) and [The 0.20.5-incubating PR List](https://github.com/apache/incubator-heron/releases/tag/0.20.5-incubating-rc4)
diff --git a/website2/website/heron-release.json b/website2/website/heron-release.json
index f5fd3d0a6b4..b8f680dcbd7 100644
--- a/website2/website/heron-release.json
+++ b/website2/website/heron-release.json
@@ -1,5 +1,6 @@
 [
+  "0.20.5-incubating",
   "0.20.4-incubating",
   "0.20.3-incubating",
   "0.20.0-incubating"
-]
\ No newline at end of file
+]
diff --git a/website2/website/pages/en/download.js b/website2/website/pages/en/download.js
index 402555964ad..66450fb528e 100644
--- a/website2/website/pages/en/download.js
+++ b/website2/website/pages/en/download.js
@@ -19,7 +19,7 @@ function getTarUrl(version, type) {
 }
 
 function getInstallScriptCryptoUrl(version, osType) {
-   return `https://downloads.apache.org/incubator/heron/heron-${version}/heron-install-${version}-${osType}.sh`
+   return `https://downloads.apache.org/incubator/heron/heron-${version}/heron-install-${version}-${osType}.sh.tar.gz`
 }
 
 function distUrl(version, type) {
@@ -27,7 +27,7 @@ function distUrl(version, type) {
 }
 
 function getInstallScriptMirrorUrl(version, type) {
-    return `http://www.apache.org/dyn/closer.lua/incubator/heron/heron-${version}/heron-install-${version}-${type}.sh`
+    return `http://www.apache.org/dyn/closer.lua/incubator/heron/heron-${version}/heron-install-${version}-${type}.sh.tar.gz`
 }
 
 function archiveUrl(version, type) {
@@ -55,18 +55,16 @@ function getProperEndpoint(version, type) {
 class Download extends React.Component {
   render() {
     const latestHeronVersion = heronReleases[0];
-    const latestArchiveMirrorUrl = getLatestArchiveMirrorUrl(latestHeronVersion, 'bin');
     const latestSrcArchiveMirrorUrl = getLatestArchiveMirrorUrl(latestHeronVersion, 'src');
     const latestSrcUrl = getTarUrl(latestHeronVersion, "src");
-    const latestdebian11TarUrl =  getTarUrl(latestHeronVersion, "debian11");
-    const latestArchiveUrl = distUrl(latestHeronVersion, 'bin');
-    const latestSrcArchiveUrl = distUrl(latestHeronVersion, 'src')
-    const rocky8InstallUrl = getInstallScriptMirrorUrl(latestHeronVersion, "rocky8")
-    const rocky8InstallCryptoUrl = getInstallScriptCryptoUrl(latestHeronVersion, "rocky8")
+    const centOS7InstallUrl = getInstallScriptMirrorUrl(latestHeronVersion, "centos7")
+    const centOS7InstallCryptoUrl = getInstallScriptCryptoUrl(latestHeronVersion, "centos7")
+    const darwinInstallUrl = getInstallScriptMirrorUrl(latestHeronVersion, "darwin")
+    const darwinInstallCryptoUrl = getInstallScriptCryptoUrl(latestHeronVersion, "darwin")
     const debian11InstallUrl = getInstallScriptMirrorUrl(latestHeronVersion, "debian11")
     const debian11InstallCryptoUrl = getInstallScriptCryptoUrl(latestHeronVersion, "debian11")
-    const ubuntu2004InstallUrl = getInstallScriptMirrorUrl(latestHeronVersion, "ubuntu20.04")
-    const ubuntu2004InstallCryptoUrl = getInstallScriptCryptoUrl(latestHeronVersion, "ubuntu20.04")
+    const ubuntu2204InstallUrl = getInstallScriptMirrorUrl(latestHeronVersion, "ubuntu22.04")
+    const ubuntu2204InstallCryptoUrl = getInstallScriptCryptoUrl(latestHeronVersion, "ubuntu22.04")
 
 
 
@@ -117,26 +115,12 @@ class Download extends React.Component {
                     <a href={`${latestSrcUrl}.sha512`}>sha512</a>
                   </td>
                 </tr>
-                <tr key={'binary'}>
-                  <th>debian11 Binary</th>
-                  <td>
-                    <a href={latestSrcArchiveMirrorUrl}>heron-{latestHeronVersion}-debian11.tar.gz</a>
-                  </td>
-                  <td>
-                    <a href={`${latestdebian11TarUrl}.asc`}>asc</a>,&nbsp;
-                    <a href={`${latestdebian11TarUrl}.sha512`}>sha512</a>
-                  </td>
-                </tr>
+
                 </tbody>
               </table>
 
               <h2 id="latest">Heron Install Scripts</h2>
-              <h3 style={{color:"red"}}> READ BEFORE DOWNLOADING </h3>
-              <p>
-                To download the Heron self-extracting install scripts: click a link below for the Operating System of your choice that will show the closest mirror for you to download from.
-                 Once you are on the page with the closest mirror right click on the link and select “save as” to download the install script.
-                  If you do not right click the link will only bring you to view the script in the browser and will not start a download.
-              </p>
+
               <table className="versions" style={{width:'100%'}}>
                 <thead>
                   <tr>
@@ -148,34 +132,44 @@ class Download extends React.Component {
                 <tbody>
 
 
-                  <tr key={'rocky-install'}>
-                    <th>rocky8</th>
+                  <tr key={'centos7-install'}>
+                    <th>CentOS 7</th>
+                    <td>
+                      <a href={`${centOS7InstallUrl}`}> heron-install-{latestHeronVersion}-centos7.sh.tar.gz</a>
+                    </td>
+                    <td>
+                      <a href={`${centOS7InstallCryptoUrl}.asc`}>asc</a>,&nbsp;
+                      <a href={`${centOS7InstallCryptoUrl}.sha512`}>sha512</a>
+                    </td>
+                  </tr>
+                  <tr key={'darwin-install'}>
+                    <th>Darwin</th>
                     <td>
-                      <a href={`${rocky8InstallUrl}`}> heron-install-0.20.4-incubating-rocky8.sh</a>
+                      <a href={`${darwinInstallUrl}`}> heron-install-{latestHeronVersion}-darwin.sh.tar.gz</a>
                     </td>
                     <td>
-                      <a href={`${rocky8InstallCryptoUrl}.asc`}>asc</a>,&nbsp;
-                      <a href={`${rocky8InstallCryptoUrl}.sha512`}>sha512</a>
+                      <a href={`${darwinInstallCryptoUrl}.asc`}>asc</a>,&nbsp;
+                      <a href={`${darwinInstallCryptoUrl}.sha512`}>sha512</a>
                     </td>
                   </tr>
                   <tr key={'debian11-install'}>
                     <th>debian11</th>
                     <td>
-                      <a href={`${debian11InstallUrl}`}> heron-install-0.20.4-incubating-debian11.sh</a>
+                      <a href={`${debian11InstallUrl}`}> heron-install-{latestHeronVersion}-debian11.sh</a>
                     </td>
                     <td>
                       <a href={`${debian11InstallCryptoUrl}.asc`}>asc</a>,&nbsp;
                       <a href={`${debian11InstallCryptoUrl}.sha512`}>sha512</a>
                     </td>
                   </tr>
-                   <tr key={'ubuntu20.04-install'}>
-                    <th>Ubuntu20.04</th>
+                   <tr key={'ubuntu22.04-install'}>
+                    <th>Ubuntu22.04</th>
                     <td>
-                     <a href={`${ubuntu2004InstallUrl}`}> heron-install-0.20.4-incubating-ubuntu20.04.sh</a>
+                     <a href={`${ubuntu2204InstallUrl}`}> heron-install-{latestHeronVersion}-ubuntu22.04.sh</a>
                     </td>
                     <td>
-                      <a href={`${ubuntu2004InstallCryptoUrl}.asc`}>asc</a>,&nbsp;
-                      <a href={`${ubuntu2004InstallCryptoUrl}.sha512`}>sha512</a>
+                      <a href={`${ubuntu2204InstallCryptoUrl}.asc`}>asc</a>,&nbsp;
+                      <a href={`${ubuntu2204InstallCryptoUrl}.sha512`}>sha512</a>
                     </td>
                   </tr>
                   </tbody>
@@ -213,4 +207,4 @@ class Download extends React.Component {
   }
 }
 
-module.exports = Download;
\ No newline at end of file
+module.exports = Download;
diff --git a/website2/website/release-notes.md b/website2/website/release-notes.md
index 54251c98de6..f1386c53fbc 100644
--- a/website2/website/release-notes.md
+++ b/website2/website/release-notes.md
@@ -1,6 +1,118 @@
 
 ## Releases
 
+### 0.20.5-incubating &mdash; 2021-10-22 <a id="0.20.5"></a>
+
+## What's Changed
+* links by @joshfischer1108 in https://github.com/apache/incubator-heron/pull/3694
+* Update Bookkeeper to 4.13.0 and Zookeeper to 3.6.3 by @nicknezis in https://github.com/apache/incubator-heron/pull/3692
+* 0.20.4 site changes by @joshfischer1108 in https://github.com/apache/incubator-heron/pull/3695
+* Fix extra.links of heron-tracker by @thinker0 in https://github.com/apache/incubator-heron/pull/3696
+* Adding heron s3 region into config by @Bouryu in https://github.com/apache/incubator-heron/pull/3697
+* Added support for adding Kubernetes annotations to the topology pod and service by @nicknezis in https://github.com/apache/incubator-heron/pull/3699
+* Add support for dynamic kubernetes labels on pod and service by @nicknezis in https://github.com/apache/incubator-heron/pull/3701
+* Add ability to add Kubernetes Secrets and SecretKeyRefs by @nicknezis in https://github.com/apache/incubator-heron/pull/3702
+* Updated Kryo to 5.2.0 by @nicknezis in https://github.com/apache/incubator-heron/pull/3705
+* Adding missing logic to Kubernetes Scheduler to properly set the Remote Debug ports by @nicknezis in https://github.com/apache/incubator-heron/pull/3704
+* Bazel upgraded to 4.1.0 by @nicknezis in https://github.com/apache/incubator-heron/pull/3703
+* Update asf yaml by @joshfischer1108 in https://github.com/apache/incubator-heron/pull/3706
+* Support external Helm version being set when building Helm packages by @nicknezis in https://github.com/apache/incubator-heron/pull/3708
+* [HERON-3711] Setup Instructions for Intellij IDEA using the Bazel plugin. by @surahman in https://github.com/apache/incubator-heron/pull/3712
+* pointing older releases to be retrieved from archive.a.o by @joshfischer1108 in https://github.com/apache/incubator-heron/pull/3714
+* mirror link change by @joshfischer1108 in https://github.com/apache/incubator-heron/pull/3716
+* Fix heron-downloader cannot load the jar package correctly by @zhangshaoning1 in https://github.com/apache/incubator-heron/pull/3709
+* update heron-shell download handler logging by @huijunwu in https://github.com/apache/incubator-heron/pull/3718
+* [HERON-3707] ConfigMap Pod Template Support by @surahman in https://github.com/apache/incubator-heron/pull/3710
+* [Heron 3707]  ConfigMap Pod Template Support Documentation by @surahman in https://github.com/apache/incubator-heron/pull/3717
+* Fix glog/gflags by @thinker0 in https://github.com/apache/incubator-heron/pull/3728
+* Update pip-2020-resolver of PexBuilder by @thinker0 in https://github.com/apache/incubator-heron/pull/3727
+* Helm: Adding option to switch the apiserver service between NodePort and ClusterIP by @windhamwong in https://github.com/apache/incubator-heron/pull/3721
+* fix get_heron_dir by @thinker0 in https://github.com/apache/incubator-heron/pull/3731
+* Fix get_heron_tracker_dir by @thinker0 in https://github.com/apache/incubator-heron/pull/3734
+* Updated deprecated policy/v1beta1 to policy/v1 PodDisruptionBudget API by @nicknezis in https://github.com/apache/incubator-heron/pull/3737
+* Bump Netty to 4.1.70 by @nicknezis in https://github.com/apache/incubator-heron/pull/3729
+* [Heron-3733] CI Pipeline Failures Involving the <stmgr_unittest> by @surahman in https://github.com/apache/incubator-heron/pull/3735
+* update filename in integration_test http_server by @huijunwu in https://github.com/apache/incubator-heron/pull/3739
+* Restart a Topology on Kubernetes by @surahman in https://github.com/apache/incubator-heron/pull/3740
+* [Heron-3723] Add support for Persistent Volumes for stateful storage by @surahman in https://github.com/apache/incubator-heron/pull/3725
+* [Heron-3724] Separate the Manager and Executors. by @surahman in https://github.com/apache/incubator-heron/pull/3741
+* Kazoo version bump for Python 3.8 by @windhamwong in https://github.com/apache/incubator-heron/pull/3743
+* Fix dependency by @thinker0 in https://github.com/apache/incubator-heron/pull/3746
+* adding new committer by @joshfischer1108 in https://github.com/apache/incubator-heron/pull/3748
+* Updated to fix missing anticrlf dependency by @nicknezis in https://github.com/apache/incubator-heron/pull/3749
+* [Heron-3723] Add support for Empty Dir, Host Path, and NFS via CLI by @surahman in https://github.com/apache/incubator-heron/pull/3747
+* Kubernetes V1Controller Cleanup by @surahman in https://github.com/apache/incubator-heron/pull/3752
+* Added Docker to Vagrant VM init script by @nicknezis in https://github.com/apache/incubator-heron/pull/3756
+* Updated External JVM Rules plugin by @nicknezis in https://github.com/apache/incubator-heron/pull/3753
+* Updated Kubernetes client library to 14.0.0 by @nicknezis in https://github.com/apache/incubator-heron/pull/3754
+* Updated Netty to 4.1.72 by @nicknezis in https://github.com/apache/incubator-heron/pull/3755
+* Updated Dhalion to 0.2.6 by @nicknezis in https://github.com/apache/incubator-heron/pull/3757
+* Helm chart cleanup by @nicknezis in https://github.com/apache/incubator-heron/pull/3758
+* HeronPy 0.20.5 release prep cleanup by @nicknezis in https://github.com/apache/incubator-heron/pull/3759
+* libunwind 1.5.0 Upgrade by @surahman in https://github.com/apache/incubator-heron/pull/3760
+* Ubuntu 22.04 Support by @surahman in https://github.com/apache/incubator-heron/pull/3761
+* Upgrade to Bazel 4.2.2 by @nicknezis in https://github.com/apache/incubator-heron/pull/3764
+* Updated Google Test which now supports Bazel by @nicknezis in https://github.com/apache/incubator-heron/pull/3765
+* Fixed stmgr unittest segfault issue on macOS by @nicknezis in https://github.com/apache/incubator-heron/pull/3767
+* Upgrade WORKSPACE python packages for python3.8 support by @Code0x58 in https://github.com/apache/incubator-heron/pull/3646
+* Fixing an incorrect use of KubeVersion in the Helm Chart.yml by @nicknezis in https://github.com/apache/incubator-heron/pull/3775
+* Update to fix Heron UI Jinja2 issue by @nicknezis in https://github.com/apache/incubator-heron/pull/3777
+* Added missing packing plan handler by @nicknezis in https://github.com/apache/incubator-heron/pull/3783
+* Removing old Tornado asynchronous annotations by @nicknezis in https://github.com/apache/incubator-heron/pull/3781
+* Add legacy CentOS7 by @thinker0 in https://github.com/apache/incubator-heron/pull/3772
+* Change centos to RockyLinux-8 by @thinker0 in https://github.com/apache/incubator-heron/pull/3773
+* Fix conflict Rocky8 by @thinker0 in https://github.com/apache/incubator-heron/pull/3787
+* Fix git url by @thinker0 in https://github.com/apache/incubator-heron/pull/3794
+* Remove unnecessary Bookkeeper format init container from Helm chart by @nicknezis in https://github.com/apache/incubator-heron/pull/3795
+* Change log4j to slf4j of Logging by @thinker0 in https://github.com/apache/incubator-heron/pull/3791
+* Remove fixed young generation heap space of instance by @thinker0 in https://github.com/apache/incubator-heron/pull/3789
+* Support Runtime jdk17 by @thinker0 in https://github.com/apache/incubator-heron/pull/3792
+* Update build-docker.sh - Removing debug flag by @windhamwong in https://github.com/apache/incubator-heron/pull/3799
+* Upgrade protobuf to 3.16.1 CVE-2021-22569 by @thinker0 in https://github.com/apache/incubator-heron/pull/3797
+* Adding Bazel Platforms support by @nicknezis in https://github.com/apache/incubator-heron/pull/3779
+* Removing prevously missed refs to removed files by @nicknezis in https://github.com/apache/incubator-heron/pull/3805
+* Updated to fix MacOS stylecheck RuntimeError by @nicknezis in https://github.com/apache/incubator-heron/pull/3803
+* Update cppcheck to 2.7 by @nicknezis in https://github.com/apache/incubator-heron/pull/3804
+* Updated rules_pkg build dependency to 0.6.0 by @nicknezis in https://github.com/apache/incubator-heron/pull/3806
+* Add logback.xml by @thinker0 in https://github.com/apache/incubator-heron/pull/3807
+* Add SLF4JBridgeHandler by @thinker0 in https://github.com/apache/incubator-heron/pull/3809
+* Bump minimist from 1.2.5 to 1.2.6 in /website2/website by @dependabot in https://github.com/apache/incubator-heron/pull/3811
+* Replace Helm chart's generic Bookkeeper platform property with specific properties by @wromansky in https://github.com/apache/incubator-heron/pull/3798
+* Bump lodash from 4.17.15 to 4.17.21 in /website2/website by @dependabot in https://github.com/apache/incubator-heron/pull/3812
+* Fix for missing physical plan in UI by @nicknezis in https://github.com/apache/incubator-heron/pull/3786
+* Bump url-parse from 1.4.7 to 1.5.10 in /website2/website by @dependabot in https://github.com/apache/incubator-heron/pull/3813
+* Change ByteSize to ByteSizeLong for Protobuf by @thinker0 in https://github.com/apache/incubator-heron/pull/3816
+* Bump prismjs from 1.19.0 to 1.27.0 in /website2/website by @dependabot in https://github.com/apache/incubator-heron/pull/3815
+* Support legacy api of HealthManger by @thinker0 in https://github.com/apache/incubator-heron/pull/3818
+* Fix HealthManager by @thinker0 in https://github.com/apache/incubator-heron/pull/3819
+* Bump async from 2.6.3 to 2.6.4 in /website2/website by @dependabot in https://github.com/apache/incubator-heron/pull/3822
+* Updated Netty to 4.1.76.Final by @nicknezis in https://github.com/apache/incubator-heron/pull/3823
+* Upgrade Kryo to 5.3.0 by @nicknezis in https://github.com/apache/incubator-heron/pull/3824
+* Joshfischer/3774/pom dependencies by @joshfischer1108 in https://github.com/apache/incubator-heron/pull/3778
+* Added License file to published Jars by @nicknezis in https://github.com/apache/incubator-heron/pull/3827
+* Update Debian Docker images from 10 to 11 by @nicknezis in https://github.com/apache/incubator-heron/pull/3828
+* [3821] Remove deprecated Host Path, NFS, and EBS support for Kubernetes by @surahman in https://github.com/apache/incubator-heron/pull/3825
+* Update rules-scala of Scala by @thinker0 in https://github.com/apache/incubator-heron/pull/3832
+* update year in NOTICE, adding ASF headers to missed files by @joshfischer1108 in https://github.com/apache/incubator-heron/pull/3834
+* Added to show the number of instances in the topology list UI. by @thinker0 in https://github.com/apache/incubator-heron/pull/3831
+* Rearrange values.yaml to better style by @windhamwong in https://github.com/apache/incubator-heron/pull/3835
+* Reduce the DEBUG log by @thinker0 in https://github.com/apache/incubator-heron/pull/3836
+* Removed random long in filename which caused leaking in upload storage by @nicknezis in https://github.com/apache/incubator-heron/pull/3838
+* Upgraded Bookkeeper to 4.14.5 by @nicknezis in https://github.com/apache/incubator-heron/pull/3837
+* Upgrade to Zookeeper 3.8.0 by @nicknezis in https://github.com/apache/incubator-heron/pull/3840
+* Added missing epel-release repo in Rocky8 Dist image Dockerfile by @nicknezis in https://github.com/apache/incubator-heron/pull/3842
+* Fix Update of topology by @thinker0 in https://github.com/apache/incubator-heron/pull/3830
+* Remove stray space characters between \ and \n by @jmtd in https://github.com/apache/incubator-heron/pull/3843
+* Updates to fix issues with the Heron API dependencies by @nicknezis in https://github.com/apache/incubator-heron/pull/3844
+
+## New Contributors
+* @Bouryu made their first contribution in https://github.com/apache/incubator-heron/pull/3697
+* @surahman made their first contribution in https://github.com/apache/incubator-heron/pull/3712
+* @zhangshaoning1 made their first contribution in https://github.com/apache/incubator-heron/pull/3709
+* @wromansky made their first contribution in https://github.com/apache/incubator-heron/pull/3798
+* @jmtd made their first contribution in https://github.com/apache/incubator-heron/pull/3843
+
+
 ### 0.20.4-incubating &mdash; 2019-08-27 <a id="0.20.4"></a>
 
 - 83a742f (tag: 0.20.4-incubating-rc1, upstream/master) Adding disclaimers to release artifacts [#3689](https://github.com/apache/incubator-heron/pull/3689)
@@ -33,10 +145,6 @@
 
 
 
-### 0.21.1-incubating &mdash; 2019-7-20 <a id="0.20.1"></a>
-
-- Example commit of 0.21.1
-
 ### 0.20.0-incubating &mdash; 2018-11-21 <a id="0.20.0"></a>
 
 This is the first release of Heron as an Apache Incubating Project
diff --git a/website2/website/versioned_docs/version-0.20.5-incubating/compiling-docker.md b/website2/website/versioned_docs/version-0.20.5-incubating/compiling-docker.md
new file mode 100644
index 00000000000..5ade258b1f4
--- /dev/null
+++ b/website2/website/versioned_docs/version-0.20.5-incubating/compiling-docker.md
@@ -0,0 +1,251 @@
+---
+id: version-0.20.5-incubating-compiling-docker
+title: Compiling With Docker
+sidebar_label: Compiling With Docker
+original_id: compiling-docker
+---
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+      http://www.apache.org/licenses/LICENSE-2.0
+    Unless required by applicable law or agreed to in writing,
+    software distributed under the License is distributed on an
+    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+    KIND, either express or implied.  See the License for the
+    specific language governing permissions and limitations
+    under the License.
+-->
+
+For developing Heron, you will need to compile it for the environment that you
+want to use it in. If you'd like to use Docker to create that build environment,
+Heron provides a convenient script to make that process easier.
+
+Currently debian11 and Ubuntu 20.04 are actively being supported.  There is also limited support for Ubuntu 18.04, and CentOS 8. If you
+need another platform there are instructions for adding new ones
+[below](#contributing-new-environments).
+
+### Requirements
+
+* [Docker](https://docs.docker.com)
+
+### Running Docker in a Virtual Machine
+
+If you are running Docker in a virtual machine (VM), it is recommended that you
+adjust your settings to help speed up the build. To do this, open
+[VirtualBox](https://www.virtualbox.org/wiki/Downloads) and go to the container
+in which Docker is running (usually "default" or whatever name you used to
+create the VM), click on the VM, and then click on **Settings**.
+
+**Note**: You will need to stop the VM before modifying these settings.
+
+![VirtualBox Processors](assets/virtual-box-processors.png)
+![VirtualBox Memory](assets/virtual-box-memory.png)
+
+## Building Heron
+
+Heron provides a `build-arfifacts.sh` script for Docker located in the
+`docker` folder. To run that script:
+
+```bash
+$ cd /path/to/heron/repo
+$ docker/build-artifacts.sh
+```
+
+Running the script by itself will display usage information:
+
+```
+Script to build heron docker image for different platforms
+  Input - directory containing the artifacts from the directory <artifact-directory>
+  Output - docker image tar file saved in the directory <artifact-directory> 
+  
+Usage: ./docker/scripts/build-docker.sh <platform> <version_string> <artifact-directory> [-s|--squash]
+  
+Argument options:
+  <platform>: darwin, debian11, ubuntu20.04, rocky8
+  <version_string>: Version of Heron build, e.g. v0.17.5.1-rc
+  <artifact-directory>: Location of compiled Heron artifact
+  [-s|--squash]: Enables using Docker experimental feature --squash
+  
+Example:
+  ./build-docker.sh ubuntu20.04 0.12.0 ~/ubuntu
+
+NOTE: If running on OSX, the output directory will need to
+      be under /Users so virtualbox has access to.
+```
+
+The following arguments are required:
+
+* `platform` --- Currently we are focused on supporting the `debian11` and `ubuntu20.04` platforms.  
+We also support building Heron locally on OSX.  You can specify this as listing `darwin` as the platform.
+ All options are:
+   - `darwin`
+   - `rocky8`
+   - `debian11`
+   - `ubuntu18.04`
+   - `ubuntu20.04`
+    
+   
+  You can add other platforms using the [instructions
+  below](#contributing-new-environments).
+* `version-string` --- The Heron release for which you'd like to build
+  artifacts.
+* `output-directory` --- The directory in which you'd like the release to be
+  built.
+
+Here's an example usage:
+
+```bash
+$ docker/scripts/build-artifacts.sh debian11 0.22.1-incubating ~/heron-release
+```
+
+This will build a Docker container specific to debian11, create a source
+tarball of the Heron repository, run a full release build of Heron, and then
+copy the artifacts into the `~/heron-release` directory.
+
+Optionally, you can also include a tarball of the Heron source if you have one.
+By default, the script will create a tarball of the current source in the Heron
+repo and use that to build the artifacts.
+
+**Note**: If you are running on Mac OS X, Docker must be run inside a VM.
+Therefore, you must make sure that both the source tarball and destination
+directory are somewhere under your home directory. For example, you cannot
+output the Heron artifacts to `/tmp` because `/tmp` refers to the directory
+inside the VM, not on the host machine. Your home directory, however, is
+automatically linked in to the VM and can be accessed normally.
+
+After the build has completed, you can go to your output directory and see all
+of the generated artifacts:
+
+```bash
+$ ls ~/heron-release
+heron-0.22.1-incubating-debian11.tar
+heron-0.22.1-incubating-debian11.tar.gz
+heron-core-0.22.1-incubating-debian11.tar.gz
+heron-install-0.22.1-incubating-debian11.sh
+heron-layer-0.22.1-incubating-debian11.tar
+heron-tools-0.22.1-incubating-debian11.tar.gz
+```
+
+## Set Up A Docker Based Development Environment
+
+In case you want to have a development environment instead of making a full build,
+Heron provides two helper scripts for you. It could be convenient if you don't want
+to set up all the libraries and tools on your machine directly.
+
+The following commands are to create a new docker image with a development environment
+and start the container based on it:
+```bash
+$ cd /path/to/heron/repo
+$ docker/scripts/dev-env-create.sh heron-dev
+```
+
+After the commands, a new docker container is started with all the libraries and tools
+installed. The operation system is Ubuntu 18.04 by default. Now you can build Heron
+like:
+```bash
+\# bazel build scripts/packages:binpkgs
+\# bazel build scripts/packages:tarpkgs
+```
+
+The current folder is mapped to the '/heron' directory in the container and any changes
+you make on the host machine will be reflected in the container. Note that when you exit
+the container and re-run the script, a new container will be started with a fresh new
+environment.
+
+When a development environment container is running, you can use the follow script
+to start a new terminal in the container.
+```bash
+$ cd /path/to/heron/repo
+$ docker/scripts/dev-env-run.sh heron-dev
+```
+
+## Contributing New Environments
+
+You'll notice that there are multiple
+[Dockerfiles](https://docs.docker.com/engine/reference/builder/) in the `docker`
+directory of Heron's source code, one for each of the currently supported
+platforms.
+
+To add support for a new platform, add a new `Dockerfile` to that directory and
+append the name of the platform to the name of the file. If you'd like to add
+support for Debian 8, for example, add a file named `Dockerfile.debian11`. Once
+you've done that, follow the instructions in the [Docker
+documentation](https://docs.docker.com/engine/articles/dockerfile_best-practices/).
+
+You should make sure that your `Dockerfile` specifies *at least* all of the
+following:
+
+### Step 1 --- The OS being used in a [`FROM`](https://docs.docker.com/engine/reference/builder/#from) statement.
+
+Here's an example:
+
+```dockerfile
+FROM rockylinux:8.5
+ ```
+
+### Step 2 --- A `TARGET_PLATFORM` environment variable using the [`ENV`](https://docs.docker.com/engine/reference/builder/#env) instruction.
+
+Here's an example:
+
+```dockerfile
+ENV TARGET_PLATFORM rocky
+```
+
+### Step 3 --- A general dependency installation script using a [`RUN`](https://docs.docker.com/engine/reference/builder/#run) instruction.
+
+Here's an example:
+
+```dockerfile
+RUN apt-get update && apt-get -y install \
+         automake \
+         build-essential \
+         cmake \
+         curl \
+         libssl-dev \
+         git \
+         libtool \
+         libunwind8 \
+         libunwind-setjmp0-dev \
+         python \
+         python2.7-dev \
+         python-software-properties \
+         software-properties-common \
+         python-setuptools \
+         unzip \
+         wget
+```
+
+### Step 4 --- An installation script for Java 11 and a `JAVA_HOME` environment variable
+
+Here's an example:
+
+```dockerfile
+RUN \
+     echo oracle-java8-installer shared/accepted-oracle-license-v1-1 select true | debconf-set-selections && \
+     add-apt-repository -y ppa:webupd8team/java && \
+     apt-get update && \
+     apt-get install -y openjdk-11-jdk-headless && \
+     rm -rf /var/lib/apt/lists/*
+
+ENV JAVA_HOME /usr/lib/jvm/java-11-openjdk-amd64
+```
+
+#### Step 5 - An installation script for [Bazel](http://bazel.io/) version {{% bazelVersion %}} or above.
+Here's an example:
+
+```dockerfile
+RUN wget -O /tmp/bazel.sh https://github.com/bazelbuild/bazel/releases/download/0.26.0/bazel-0.26.0-installer-linux-x86_64.sh \
+         && chmod +x /tmp/bazel.sh \
+         && /tmp/bazel.sh
+```
+
+### Step 6 --- Add the `compile.sh` script (from the `docker` folder) that compiles Heron
+
+```dockerfile
+ADD compile.sh /compile.sh
+```
diff --git a/website2/website/versioned_docs/version-0.20.5-incubating/compiling-linux.md b/website2/website/versioned_docs/version-0.20.5-incubating/compiling-linux.md
new file mode 100644
index 00000000000..7ed1ea41838
--- /dev/null
+++ b/website2/website/versioned_docs/version-0.20.5-incubating/compiling-linux.md
@@ -0,0 +1,234 @@
+---
+id: version-0.20.5-incubating-compiling-linux
+title: Compiling on Linux
+sidebar_label: Compiling on Linux
+original_id: compiling-linux
+---
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+      http://www.apache.org/licenses/LICENSE-2.0
+    Unless required by applicable law or agreed to in writing,
+    software distributed under the License is distributed on an
+    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+    KIND, either express or implied.  See the License for the
+    specific language governing permissions and limitations
+    under the License.
+-->
+
+Heron can currently be built on the following Linux platforms:
+
+* [Ubuntu 20.04](#building-on-ubuntu-20.04)
+* [Rocky 8](#building-on-rocky-8)
+
+## Building on Ubuntu 20.04
+
+To build Heron on a fresh Ubuntu 20.04 installation:
+
+### Step 1 --- Update Ubuntu
+
+```bash
+$ sudo apt-get update -y
+$ sudo apt-get upgrade -y
+```
+
+### Step 2 --- Install required libraries
+
+```bash
+$ sudo apt-get install git build-essential automake cmake libtool-bin zip ant \
+  libunwind-setjmp0-dev zlib1g-dev unzip pkg-config python3-setuptools -y
+```
+
+#### Step 3 --- Set the following environment variables
+
+```bash
+export CC=/usr/bin/gcc
+export CCX=/usr/bin/g++
+```
+
+### Step 4 --- Install JDK 11 and set JAVA_HOME
+
+```bash
+$ sudo add-apt-repository ppa:webupd8team/java
+$ sudo apt-get update -y
+$ sudo apt-get install openjdk-11-jdk-headless -y
+$ export JAVA_HOME="/usr/lib/jvm/java-11-openjdk-amd64"
+```
+
+#### Step 5 - Install Bazel {{% bazelVersion %}}
+
+```bash
+wget -O /tmp/bazel.sh https://github.com/bazelbuild/bazel/releases/download/0.26.0/bazel-0.26.0-installer-linux-x86_64.sh
+chmod +x /tmp/bazel.sh
+/tmp/bazel.sh --user
+```
+
+Make sure to download the appropriate version of Bazel (currently {{%
+bazelVersion %}}).
+
+### Step 6 --- Install python development tools
+```bash
+$ sudo apt-get install  python3-dev python3-pip
+```
+
+### Step 7 --- Make sure the Bazel executable is in your `PATH`
+
+```bash
+$ export PATH="$PATH:$HOME/bin"
+```
+
+### Step 8 --- Fetch the latest version of Heron's source code
+
+```bash
+$ git clone https://github.com/apache/incubator-heron.git && cd heron
+```
+
+### Step 9 --- Configure Heron for building with Bazel
+
+```bash
+$ ./bazel_configure.py
+```
+
+### Step 10 --- Build the project
+
+```bash
+$ bazel build heron/...
+```
+
+### Step 11 --- Build the packages
+
+```bash
+$ bazel build scripts/packages:binpkgs
+$ bazel build scripts/packages:tarpkgs
+```
+
+This will install Heron packages in the `bazel-bin/scripts/packages/` directory.
+
+## Manually Installing Libraries
+
+If you encounter errors with [libunwind](http://www.nongnu.org/libunwind), [libtool](https://www.gnu.org/software/libtool), or
+[gperftools](https://github.com/gperftools/gperftools/releases), we recommend
+installing them manually.
+
+### Compling and installing libtool
+
+```bash
+$ wget http://ftpmirror.gnu.org/libtool/libtool-2.4.6.tar.gz
+$ tar -xvf libtool-2.4.6.tar.gz
+$ cd libtool-2.4.6
+$ ./configure
+$ make
+$ sudo make install
+```
+
+### Compiling and installing libunwind
+
+```bash
+$ wget http://download.savannah.gnu.org/releases/libunwind/libunwind-1.1.tar.gz
+$ tar -xvf libunwind-1.1.tar.gz
+$ cd libunwind-1.1
+$ ./configure
+$ make
+$ sudo make install
+```
+
+### Compiling and installing gperftools
+
+```bash
+$ wget https://github.com/gperftools/gperftools/releases/download/gperftools-2.5/gperftools-2.5.tar.gz
+$ tar -xvf gperftools-2.5.tar.gz
+$ cd gperftools-2.5
+$ ./configure
+$ make
+$ sudo make install
+```
+
+## Building on Rocky 8
+
+To build Heron on a fresh Rocky 8 installation:
+
+### Step 1 --- Install the required dependencies
+
+```bash
+$ sudo yum install gcc gcc-c++ kernel-devel wget unzip zlib-devel zip git automake cmake patch libtool ant pkg-config -y
+```
+
+### Step 2 --- Install libunwind from source
+
+```bash
+$ wget http://download.savannah.gnu.org/releases/libunwind/libunwind-1.1.tar.gz
+$ tar xvf libunwind-1.1.tar.gz
+$ cd libunwind-1.1
+$ ./configure
+$ make
+$ sudo make install
+```
+
+### Step 3 --- Set the following environment variables
+
+```bash
+$ export CC=/usr/bin/gcc
+$ export CCX=/usr/bin/g++
+```
+
+### Step 4 --- Install JDK 11
+
+```bash
+$ sudo yum install java-11-openjdk java-11-openjdk-devel
+$ export JAVA_HOME=/usr/lib/jvm/java-11-openjdk
+```
+
+#### Step 5 - Install Bazelisk
+
+Bazelisk helps automate the management of Bazel versions
+
+```bash
+wget -O /tmp/bazelisk https://github.com/bazelbuild/bazelisk/releases/download/v1.11.0/bazelisk-darwin-amd64
+chmod +x /tmp/bazelisk
+sudo mv /tmp/bazelisk /usr/local/bin/bazel
+```
+
+### Step 6 --- Fetch the latest version of Heron's source code
+
+```bash
+$ git clone https://github.com/apache/incubator-heron.git && cd incubator-heron
+```
+
+
+### Step 7 --- Configure Heron for building with Bazel
+
+```bash
+$ ./bazel_configure.py
+```
+
+### Step 8 --- Build the project
+
+```bash
+$ bazel build heron/...
+```
+
+This will build in the Bazel default `fastbuild` mode. Production release packages include additional performance optimizations not enabled by default. To enable production optimizations, include the `opt` flag. This defaults to optimization level `-O2`. The second option overrides the setting to bump it to `-CO3`.
+
+```bash
+$ bazel build -c opt heron/...
+```
+
+```bash
+$ bazel build -c opt --copt=-O3 heron/...
+```
+
+If you wish to add the code syntax style check, add `--config=stylecheck`.
+
+### Step 9 --- Build the binary packages
+
+```bash
+$ bazel build scripts/packages:binpkgs
+$ bazel build scripts/packages:tarpkgs
+```
+
+This will install Heron packages in the `bazel-bin/scripts/packages/` directory.
diff --git a/website2/website/versioned_docs/version-0.20.5-incubating/compiling-osx.md b/website2/website/versioned_docs/version-0.20.5-incubating/compiling-osx.md
new file mode 100644
index 00000000000..f04cf0de6e2
--- /dev/null
+++ b/website2/website/versioned_docs/version-0.20.5-incubating/compiling-osx.md
@@ -0,0 +1,102 @@
+---
+id: version-0.20.5-incubating-compiling-osx
+title: Compiling on OS X
+sidebar_label: Compiling on OS X
+original_id: compiling-osx
+---
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+      http://www.apache.org/licenses/LICENSE-2.0
+    Unless required by applicable law or agreed to in writing,
+    software distributed under the License is distributed on an
+    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+    KIND, either express or implied.  See the License for the
+    specific language governing permissions and limitations
+    under the License.
+-->
+
+This is a step-by-step guide to building Heron on Mac OS X (versions 10.10 and
+  10.11).
+
+### Step 1 --- Install Homebrew
+
+If [Homebrew](http://brew.sh/) isn't yet installed on your system, you can
+install it using this one-liner:
+
+```bash
+$ /usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
+```
+
+### Step 2 -- Install Bazelisk
+
+Bazelisk helps automate the management of Bazel versions
+
+```bash
+brew install bazelisk
+```
+
+### Step 2 --- Install other required libraries
+
+```bash
+brew install automake
+brew install cmake
+brew install libtool
+brew install ant
+brew install pkg-config
+```
+
+### Step 3 --- Set the following environment variables
+
+```bash
+$ export CC=/usr/bin/clang
+$ export CXX=/usr/bin/clang++
+$ echo $CC $CXX
+```
+
+### Step 4 --- Fetch the latest version of Heron's source code
+
+```bash
+$ git clone https://github.com/apache/incubator-heron.git && cd incubator-heron
+```
+
+### Step 5 --- Configure Heron for building with Bazel
+
+```bash
+$ ./bazel_configure.py
+```
+
+If this configure script fails with missing dependencies, Homebrew can be used
+to install those dependencies.
+
+### Step 6 --- Build the project
+
+```bash
+$ bazel build heron/...
+```
+
+This will build in the Bazel default `fastbuild` mode. Production release packages include additional performance optimizations not enabled by default. To enable production optimizations, include the `opt` flag. This defaults to optimization level `-O2`. The second option overrides the setting to bump it to `-CO3`.
+
+```bash
+$ bazel build -c opt heron/...
+```
+
+```bash
+$ bazel build -c opt --copt=-O3 heron/...
+```
+
+If you wish to add the code syntax style check, add `--config=stylecheck`.
+
+### Step 7 --- Build the packages
+
+```bash
+$ bazel build scripts/packages:binpkgs
+$ bazel build scripts/packages:tarpkgs
+```
+
+This will install Heron packages in the `bazel-bin/scripts/packages/` directory.
diff --git a/website2/website/versioned_docs/version-0.20.5-incubating/compiling-overview.md b/website2/website/versioned_docs/version-0.20.5-incubating/compiling-overview.md
new file mode 100644
index 00000000000..51e21b5c67b
--- /dev/null
+++ b/website2/website/versioned_docs/version-0.20.5-incubating/compiling-overview.md
@@ -0,0 +1,135 @@
+---
+id: version-0.20.5-incubating-compiling-overview
+title: Compiling Heron
+sidebar_label: Compiling Overview
+original_id: compiling-overview
+---
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+      http://www.apache.org/licenses/LICENSE-2.0
+    Unless required by applicable law or agreed to in writing,
+    software distributed under the License is distributed on an
+    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+    KIND, either express or implied.  See the License for the
+    specific language governing permissions and limitations
+    under the License.
+-->
+
+Heron is currently available for [Mac OS X 12](compiling-osx),
+[Ubuntu 20.04](compiling-linux), and [debian11](compiling-docker#building-heron).
+ This guide describes the basics of the
+Heron build system. For step-by-step build instructions for other platforms,
+the following guides are available:
+
+* [Building on Linux Platforms](compiling-linux)
+* [Building on Mac OS X](compiling-osx)
+
+Heron can be built either [in its entirety](#building-all-components), as [individual components](#building-specific-components).
+
+Instructions on running unit tests for Heron can also be found in [Testing Heron](compiling-running-tests).
+
+## Requirements
+
+You must have the following installed to compile Heron:
+
+* [Bazel](http://bazel.io/docs/install.html) = {{% bazelVersion %}}. Later
+  versions might work but have not been tested. See [Installing Bazel](#installing-bazel)below.
+* [Java 11](https://www.oracle.com/java/technologies/javase-jdk11-downloads.html)
+  is required by Bazel and Heron;
+  topologies can be written in Java 7 or above
+  , but Heron jars are required to run with a Java 11 JRE.
+* [Autoconf](http://www.gnu.org/software/autoconf/autoconf.html) >=
+  2.6.3
+* [Automake](https://www.gnu.org/software/automake/) >= 1.11.1
+* [GNU Make](https://www.gnu.org/software/make/) >= 3.81
+* [GNU Libtool](http://www.gnu.org/software/libtool/) >= 2.4.6
+* [gcc/g++](https://gcc.gnu.org/) >= 4.8.1 (Linux platforms)
+* [CMake](https://cmake.org/) >= 2.6.4
+* [Python](https://www.python.org/) >= 3.8
+* [Perl](https://www.perl.org/) >= 5.8.8
+* [Ant] (https://ant.apache.org/) >= 1.10.0
+* [Pkg-Config] (https://www.freedesktop.org/wiki/Software/pkg-config/) >= 0.29.2
+
+Export the `CC` and `CXX` environment variables with a path specific to your
+machine:
+
+```bash
+$ export CC=/your-path-to/bin/c_compiler
+$ export CXX=/your-path-to/bin/c++_compiler
+$ echo $CC $CXX
+```
+
+## Installing Bazel
+
+Heron uses the [Bazel](http://bazel.io) build tool. Bazel releases can be found here:
+https://github.com/bazelbuild/bazel/releases/tag/{{% bazelVersion %}}
+and installation instructions can be found [here](http://bazel.io/docs/install.html).
+
+To ensure that Bazel has been installed, run `bazel version` and check the
+version (listed next to `Build label` in the script's output) to ensure that you
+have Bazel {{% bazelVersion %}}.
+
+## Configuring Bazel
+
+There is a Python script that you can run to configure Bazel on supported
+platforms:
+
+```bash
+$ cd /path/to/heron
+$ ./bazel_configure.py
+```
+
+## Building
+
+```bash
+$ bazel build heron/...
+```
+
+This will build in the Bazel default `fastbuild` mode. Production release packages include additional performance optimizations not enabled by default. To enable production optimizations, include the `opt` flag. This defaults to optimization level `-O2`. The second option overrides the setting to bump it to `-CO3`.
+
+```bash
+$ bazel build -c opt heron/...
+```
+
+```bash
+$ bazel build -c opt --copt=-O3 heron/...
+```
+
+If you wish to add the code syntax style check, add `--config=stylecheck`.
+
+### Building All Components
+
+The Bazel build process can produce either executable install scripts or
+bundled tars. To build executables or tars for all Heron components at once,
+use the following `bazel build` commands, respectively:
+
+```bash
+$ bazel build scripts/packages:binpkgs
+$ bazel build scripts/packages:tarpkgs
+```
+
+Resulting artifacts can be found in subdirectories below the `bazel-bin`
+directory. The `heron-tracker` executable, for example, can be found at
+`bazel-bin/heron/tools/tracker/src/python/heron-tracker`.
+
+### Building Specific Components
+
+As an alternative to building a full release, you can build Heron executables
+for a single Heron component (such as the [Heron
+Tracker](user-manuals-heron-tracker-runbook)) by passing a target to the `bazel
+build` command. For example, the following command would build the Heron Tracker:
+
+```bash
+$ bazel build heron/tools/tracker/src/python:heron-tracker
+```
+
+## Testing Heron
+
+Instructions for running Heron unit tests can be found at [Testing
+Heron](compiling-running-tests).
diff --git a/website2/website/versioned_docs/version-0.20.5-incubating/compiling-running-tests.md b/website2/website/versioned_docs/version-0.20.5-incubating/compiling-running-tests.md
new file mode 100644
index 00000000000..caaa17bc451
--- /dev/null
+++ b/website2/website/versioned_docs/version-0.20.5-incubating/compiling-running-tests.md
@@ -0,0 +1,92 @@
+---
+id: version-0.20.5-incubating-compiling-running-tests
+title: Running Tests
+sidebar_label: Running Tests
+original_id: compiling-running-tests
+---
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+      http://www.apache.org/licenses/LICENSE-2.0
+    Unless required by applicable law or agreed to in writing,
+    software distributed under the License is distributed on an
+    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+    KIND, either express or implied.  See the License for the
+    specific language governing permissions and limitations
+    under the License.
+-->
+
+Heron uses [Bazel](compiling-overview#installing-bazel) for building
+and running unit tests. Before running tests, first set up your build environment
+as described in [Compiling Heron](compiling-overview).
+
+### Running Unit Tests
+
+The following command will run all tests:
+
+```bash
+$ bazel test heron/...
+```
+
+To run a specific [test
+target](http://bazel.io/docs/test-encyclopedia.html), pass the test target name.
+
+```bash
+$ bazel test heron/statemgrs/tests/java:localfs-statemgr_unittest
+```
+
+### Discovering Unit Test Targets
+
+To see a full listing of all Bazel test targets:
+
+```bash
+$ bazel query 'kind(".*_test rule", ...)'
+```
+
+For **Java** targets only:
+
+```bash
+$ bazel query 'kind("java_test rule", ...)'
+```
+
+For **C++** targets:
+
+```bash
+$ bazel query 'kind("cc_test rule", ...)'
+```
+
+For **Python** targets:
+
+```bash
+$ bazel query 'kind("pex_test rule", ...)'
+```
+
+### Running Integration Tests
+
+Integration tests are divided into two categories:
+
+* Functional integration tests
+
+    These integration tests are designed for testing the functionality of 
+    Heron, such as topologies and groupings.
+    To run the functional integration tests on a Mac OS X, do the following:
+
+    ```bash
+    $ ./scripts/run_integration_test.sh
+    ```
+
+* Failure integration tests
+
+    These integration tests are designed for testing recovery from failure/restart
+    in certain processes, such as Topology Manager and Metrics Manager.
+    To run the failure integration tests on a Mac OS X, do the following:
+
+    ```bash
+    $ bazel build integration_test/src/...
+    $ ./bazel-bin/integration_test/src/python/local_test_runner/local-test-runner
+    ```
diff --git a/website2/website/versioned_docs/version-0.20.5-incubating/getting-started-docker.md b/website2/website/versioned_docs/version-0.20.5-incubating/getting-started-docker.md
new file mode 100644
index 00000000000..67e3ad94769
--- /dev/null
+++ b/website2/website/versioned_docs/version-0.20.5-incubating/getting-started-docker.md
@@ -0,0 +1,50 @@
+---
+id: version-0.20.5-incubating-getting-started-docker
+title: The official Apache Heron Docker Image(s)
+sidebar_label: Heron & Docker
+original_id: getting-started-docker
+---
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+      http://www.apache.org/licenses/LICENSE-2.0
+    Unless required by applicable law or agreed to in writing,
+    software distributed under the License is distributed on an
+    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+    KIND, either express or implied.  See the License for the
+    specific language governing permissions and limitations
+    under the License.
+-->
+
+> The current version of Heron is {{heron:version}}
+
+The official Apache Heron Docker image is located at the link below
+
+<a target="_blank" href="https://hub.docker.com/repository/docker/apache/heron">https://hub.docker.com/repository/docker/apache/heron</a>
+
+### Docker Quickstart
+In one terminal execute to start Heron in a container
+
+```bash
+$ docker run -it  --rm \
+   -p 8889:8889 \
+   -p 8888:8888 \
+   --name local-heron \
+   apache/heron:0.20.4-incubating supervisord --nodaemon
+```
+In another terminal execute the following to deploy a job:
+```bash
+$ docker exec -it \
+   local-heron \
+   bash -c "heron submit sandbox  /heron/examples/heron-eco-examples.jar org.apache.heron.eco.Eco --eco-config-file /heron/examples/heron_wordcount.yaml"
+```
+
+View your job details by navigating to `localhost:8889` in your browser.  Congratulations, you've just deployed a Heron job in Docker!
+
+
+
diff --git a/website2/website/versioned_docs/version-0.20.5-incubating/getting-started-local-single-node.md b/website2/website/versioned_docs/version-0.20.5-incubating/getting-started-local-single-node.md
new file mode 100644
index 00000000000..176582f6a61
--- /dev/null
+++ b/website2/website/versioned_docs/version-0.20.5-incubating/getting-started-local-single-node.md
@@ -0,0 +1,259 @@
+---
+id: version-0.20.5-incubating-getting-started-local-single-node
+title: Local (Single Node)
+sidebar_label: Local (Single Node)
+original_id: getting-started-local-single-node
+---
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+      http://www.apache.org/licenses/LICENSE-2.0
+    Unless required by applicable law or agreed to in writing,
+    software distributed under the License is distributed on an
+    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+    KIND, either express or implied.  See the License for the
+    specific language governing permissions and limitations
+    under the License.
+-->
+
+> The current version of Heron is {{heron:version}}
+For other platforms, you need to build from source. Please refer to the [guide to compiling Heron](compiling-overview).
+
+## Step 1 --- Download the Heron tools
+
+Heron tools can be installed using [installation scripts](#using-installation-scripts).
+
+> Note: As of version  0.20.4-incubating, there is a python compatibility on OSX.
+> The supported platforms are rocky8, debian11, and Ubuntu18.04.
+
+## Using installation scripts
+
+To install Heron binaries directly, using installation scripts, go to Heron's [download page](https://heron.apache.org/download)
+and see a full listing of Heron releases for each available platform. The installation script for macOS (`debian11`), for example, is named
+`heron-install-{{% heronVersion %}}-debian11.sh`.
+
+Once you've downloaded the script, make it executable using [chmod](https://en.wikipedia.org/wiki/Chmod):
+
+```bash
+$ chmod +x heron-*.sh
+```
+
+Now run the [Heron client](user-manuals-heron-cli) installation script with the `--user` flag set. Here's an example:
+
+> The script will install executables in the `~/bin` folder. You should add that folder to your `PATH` using `export PATH=~/bin:$PATH`.
+
+```bash
+$ ./heron-install-{{% heronVersion %}}-ubuntu.sh --user
+Heron installer
+---------------
+
+Uncompressing...done
+...
+Heron is now installed!
+```
+
+To check that Heron is successfully installed, run `heron version`:
+
+```bash
+$ heron version
+heron.build.git.revision : b580f689d8cbcb3026966bde2aacf6da74d5bcf5
+heron.build.git.status : Modified
+heron.build.host : MBP
+heron.build.time : Sun Mar 22 06:42:05 CDT 2020
+heron.build.timestamp : 1584877325000
+heron.build.user : ...
+heron.build.version : update-docs-compiling
+```
+
+## Step 2 --- Launch an example topology
+
+> #### Note for macOS users
+
+> If you want to run topologies locally on macOS, you may need to add your
+> hostname to your `/etc/hosts` file under `localhost`. Here's an example line:
+> `127.0.0.1 localhost My-Mac-Laptop.local`. You can fetch your hostname by simply
+> running `hostname` in your shell.
+
+If you set the `--user` flag when running the installation scripts, some example
+topologies will be installed in your `~/.heron/examples` directory. You can
+launch an example [topology](heron-topology-concepts) locally (on your machine)
+using the [Heron CLI tool](user-manuals-heron-cli):
+
+```bash
+$ heron submit local \
+  ~/.heron/examples/heron-streamlet-examples.jar \
+  org.apache.heron.examples.streamlet.WindowedWordCountTopology \
+  WindowedWordCountTopology \
+  --deploy-deactivated
+```
+
+The output should look something like this:
+
+```bash
+INFO: Launching topology 'WindowedWordCountTopology'
+
+...
+
+INFO: Topology 'WindowedWordCountTopology' launched successfully
+INFO: Elapsed time: 3.409s.
+```
+
+This will *submit* the topology to your locally running Heron cluster but it
+won't *activate* the topology because the `--deploy-deactivated` flag was set.
+Activating the topology will be explored in [step
+5](#step-5-explore-topology-management-commands) below.
+
+Note that the output shows whether the topology has been launched successfully as well
+the working directory for the topology.
+
+To check what's under the working directory, run:
+
+```bash
+$ ls -al ~/.herondata/topologies/local/${ROLE}/WindowedWordCountTopology
+-rw-r--r--   1 username  staff     6141 Oct 12 09:58 WindowedWordCountTopology.defn
+-rw-r--r--   1 username  staff        5 Oct 12 09:58 container_1_flatmap1_4.pid
+-rw-r--r--   1 username  staff        5 Oct 12 09:58 container_1_logger1_3.pid
+# etc.
+```
+
+All instances' log files can be found in `log-files` under the working directory:
+
+```bash
+$ ls -al ~/.herondata/topologies/local/${ROLE}/WindowedWordCountTopology/log-files
+total 408
+-rw-r--r--   1 username  staff   5055 Oct 12 09:58 container_1_flatmap1_4.log.0
+-rw-r--r--   1 username  staff      0 Oct 12 09:58 container_1_flatmap1_4.log.0.lck
+-rw-r--r--   1 username  staff   5052 Oct 12 09:58 container_1_logger1_3.log.0
+# etc.
+```
+
+## Step 3 --- Start Heron Tracker
+
+The [Heron Tracker](user-manuals-heron-tracker-runbook) is a web service that
+continuously gathers information about your Heron cluster. You can launch the
+tracker by running the `heron-tracker` command (which is already installed):
+
+```bash
+$ heron-tracker
+... Running on port: 8888
+... Using config file: $HOME/.herontools/conf/heron_tracker.yaml
+```
+
+You can reach Heron Tracker in your browser at [http://localhost:8888](http://localhost:8888)
+and see something like the following upon successful submission of the topology:
+![Heron Tracker](assets/heron-tracker.png)
+
+To explore Heron Tracker, please refer to [Heron Tracker Rest API](user-manuals-tracker-rest)
+
+## Step 4 --- Start Heron UI
+
+[Heron UI](user-manuals-heron-ui-runbook) is a user interface that uses Heron Tracker to
+provide detailed visual representations of your Heron topologies. To launch
+Heron UI:
+
+```bash
+$ heron-ui
+... Running on port: 8889
+... Using tracker url: http://localhost:8888
+```
+
+You can open Heron UI in your browser at [http://localhost:8889](http://localhost:8889)
+and see something like this upon successful submission of the topology:
+![Heron UI](assets/heron-ui.png)
+
+To play with Heron UI, please refer to [Heron UI Usage Guide](guides-ui-guide)
+
+## Step 5 --- Explore topology management commands
+
+In step 2 you submitted a topology to your local cluster. The `heron` CLI tool
+also enables you to activate, deactivate, and kill topologies and more.
+
+```bash
+$ heron activate local WindowedWordCountTopology
+$ heron deactivate local WindowedWordCountTopology
+$ heron kill local WindowedWordCountTopology
+```
+
+Upon successful actions, a message similar to the following will appear:
+
+```bash
+INFO: Successfully activated topology 'WindowedWordCountTopology'
+INFO: Elapsed time: 1.980s.
+```
+
+For more info on these commands, read about [topology
+lifecycles](heron-topology-concepts#topology-lifecycle).
+
+To list the available CLI commands, run `heron` by itself:
+
+```bash
+usage: heron <command> <options> ...
+
+Available commands:
+    activate           Activate a topology
+    deactivate         Deactivate a topology
+    help               Prints help for commands
+    kill               Kill a topology
+    restart            Restart a topology
+    submit             Submit a topology
+    version            Print version of heron-cli
+
+For detailed documentation, go to https://heron.apache.org
+```
+
+To invoke help output for a command, run `heron help COMMAND`. Here's an
+example:
+
+```bash
+$ heron help submit
+usage: heron submit [options] cluster/[role]/[environ] topology-file-name topology-class-name [topology-args]
+
+Required arguments:
+  cluster/[role]/[env]  Cluster, role, and environ to run topology
+  topology-file-name    Topology jar/tar/zip file
+  topology-class-name   Topology class name
+
+Optional arguments:
+  --config-path (a string; path to cluster config; default: "$HOME/.heron/conf")
+  --config-property (key=value; a config key and its value; default: [])
+  --deploy-deactivated (a boolean; default: "false")
+  -D DEFINE             Define a system property to pass to java -D when
+                        running main.
+  --verbose (a boolean; default: "false")
+```
+
+## Step 6 --- Explore other example topologies
+
+The source code for the example topologies can be found
+[on
+GitHub](https://github.com/apache/incubator-heron/tree/master/examples/src/java/org/apache/heron/examples).
+The included example topologies:
+
+* `AckingTopology.java` --- A topology with acking enabled.
+* `ComponentJVMOptionsTopology.java` --- A topology that supplies JVM options
+  for each component.
+* `CustomGroupingTopology.java` --- A topology that implements custom grouping.
+* `ExclamationTopology.java` --- A spout that emits random words to a bolt that
+  then adds an exclamation mark.
+* `MultiSpoutExclamationTopology.java` --- a topology with multiple spouts.
+* `MultiStageAckingTopology.java` --- A three-stage topology. A spout emits to a
+  bolt that then feeds to another bolt.
+* `TaskHookTopology.java` --- A topology that uses a task hook to subscribe to
+   event notifications.
+
+## Troubleshooting
+In case of any issues, please refer to [Quick Start Troubleshooting](getting-started-troubleshooting-guide).
+
+### Next Steps
+
+* [Migrate Storm topologies](getting-started-migrate-storm-topologies) to Heron with simple `pom.xml`
+  changes
+* [Deploy topologies](deployment-overview) in clustered, scheduler-driven
+  environments (such as on [Aurora](schedulers-aurora-cluster)
+  and [locally](schedulers-local))
+* [Develop topologies](heron-architecture) for Heron
\ No newline at end of file
diff --git a/website2/website/versioned_docs/version-0.20.5-incubating/schedulers-k8s-execution-environment.md b/website2/website/versioned_docs/version-0.20.5-incubating/schedulers-k8s-execution-environment.md
new file mode 100644
index 00000000000..80ea152bb80
--- /dev/null
+++ b/website2/website/versioned_docs/version-0.20.5-incubating/schedulers-k8s-execution-environment.md
@@ -0,0 +1,738 @@
+---
+id: version-0.20.5-incubating-schedulers-k8s-execution-environment
+title: Kubernetes Execution Environment Customization
+hide_title: true
+sidebar_label: Kubernetes Environment Customization
+original_id: schedulers-k8s-execution-environment
+---
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+      http://www.apache.org/licenses/LICENSE-2.0
+    Unless required by applicable law or agreed to in writing,
+    software distributed under the License is distributed on an
+    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+    KIND, either express or implied.  See the License for the
+    specific language governing permissions and limitations
+    under the License.
+-->
+
+# Customizing the Heron Execution Environment in Kubernetes
+
+This document demonstrates how you can customize various aspects of the Heron execution environment when using the Kubernetes Scheduler.
+
+<br>
+
+---
+
+<br>
+
+## Customizing a Topology's Execution Environment Using Pod Templates
+
+<br>
+
+> This section demonstrates how you can utilize custom [Pod Templates](https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates) embedded in [Configuration Maps](https://kubernetes.io/docs/concepts/configuration/configmap/) for your Topology's `Executor`s and `Manager` (hereinafter referred to as `Heron containers`). You may specify different Pod Templates for different topologies.
+
+<br/>
+
+When you deploy a topology to Heron on Kubernetes, you may specify individual Pod Templates to be used in your topology's `Executor`s and `Manager`. This can be achieved by providing valid Pod Templates, and embedding the Pod Templates in Configuration Maps. By default, Heron will use a minimally configured Pod Template which is adequate to deploy a topology.
+
+Pod Templates will allow you to configure most aspects of your topology's execution environment, with some exceptions. There are some aspects of Pods for which Heron will have the final say, and which will not be user-customizable. Please view the [tables](#heron-configured-items-in-pod-templates) at the end of this section to identify what is set by Heron.
+
+<br>
+
+> ***System Administrators:***
+>
+> * You may wish to disable the ability to load custom Pod Templates. To achieve this, you must pass the define option `-D heron.kubernetes.pod.template.disabled=true` to the Heron API Server on the command line when launching. This command has been added to the Kubernetes configuration files to deploy the Heron API Server and is set to `false` by default.
+> * If you have a custom `Role` for the Heron API Server you will need to ensure the `ServiceAccount` attached to the API server, via a `RoleBinding`, has the correct permissions to access the `ConfigMaps`:
+>
+>```yaml
+>rules:
+>- apiGroups: 
+>  - ""
+>  resources: 
+>  - configmaps
+>  verbs: 
+>  - get
+>  - list
+>```
+
+<br>
+
+### Preparation
+
+To deploy a custom Pod Template to Kubernetes with your topology, you must provide a valid Pod Template embedded in a valid Configuration Map. We will be using the following variables throughout this document, some of which are reserved variable names:
+
+* `POD-TEMPLATE-NAME`: This is the name of the Pod Template's YAML definition file. This is ***not*** a reserved variable and is a place-holder name.
+* `CONFIG-MAP-NAME`: This is the name that will be used by the Configuration Map in which the Pod Template will be embedded by `kubectl`. This is ***not*** a reserved variable and is a place-holder name.
+* `heron.kubernetes.[executor | manager].pod.template`: This variable name is used as the key passed to Heron for the `--config-property` on the CLI. This ***is*** a reserved variable name.
+
+***NOTE***: Please do ***not*** use the `.` (period character) in the name of the `CONFIG-MAP-NAME`. This character will be used as a delimiter when submitting your topologies.
+
+It is highly advised that you validate your Pod Templates before placing them in a `ConfigMap` to isolate any validity issues using a tool such as [Kubeval](https://kubeval.instrumenta.dev/) or the built-in `dry-run` functionality in Kubernetes. Whilst these tools are handy, they will not catch all potential errors in Kubernetes configurations.
+
+***NOTE***: When submitting a Pod Template to customize an `Executor` or `Manager`, Heron will look for containers named `executor` and `manager` respectively. These containers will be modified to support the functioning of Heron, please read further below.
+
+#### Pod Templates
+
+An example of the Pod Template format is provided below, and is derived from the configuration for the Heron Tracker Pod:
+
+```yaml
+apiVersion: v1
+kind: PodTemplate
+metadata:
+  name: heron-tracker
+  namespace: default
+template:
+  metadata:
+    labels:
+      app: heron-tracker
+  spec:
+    containers:
+      - name: heron-tracker
+        image: apache/heron:latest
+        ports:
+          - containerPort: 8888
+            name: api-port
+        resources:
+          requests:
+            cpu: "100m"
+            memory: "200M"
+          limits:
+            cpu: "400m"
+            memory: "512M"
+```
+
+You would need to save this file as `POD-TEMPLATE-NAME`. Once you have a valid Pod Template you may proceed to generate a `ConfigMap`.
+
+#### Configuration Maps
+
+> You must place the `ConfigMap` in the same namespace as the Heron API Server using the `--namespace` option in the commands below if the API Server is not in the `default` namespace.
+
+To generate a `ConfigMap` you will need to run the following command:
+
+```bash
+kubectl create configmap CONFIG-MAP-NAME --from-file path/to/POD-TEMPLATE-NAME
+```
+
+You may then want to verify the contents of the `ConfigMap` by running the following command:
+
+```bash
+kubectl get configmaps CONFIG-MAP-NAME -o yaml
+```
+
+The `ConfigMap` should appear similar to the one below for our example:
+
+```yaml
+apiVersion: v1
+data:
+  POD-TEMPLATE-NAME: |
+    apiVersion: v1
+    kind: PodTemplate
+    metadata:
+      name: heron-tracker
+      namespace: default
+    template:
+      metadata:
+        labels:
+          app: heron-tracker
+      spec:
+        containers:
+          - name: heron-tracker
+            image: apache/heron:latest
+            ports:
+              - containerPort: 8888
+                name: api-port
+            resources:
+              requests:
+                cpu: "100m"
+                memory: "200M"
+              limits:
+                cpu: "400m"
+                memory: "512M"
+kind: ConfigMap
+metadata:
+  creationTimestamp: "2021-09-27T21:55:30Z"
+  name: CONFIG-MAP-NAME
+  namespace: default
+  resourceVersion: "1313"
+  uid: ba001653-03d9-4ac8-804c-d2c55c974281
+```
+
+### Submitting
+
+To use the `ConfigMap` for a topology you would will need to submit with the additional flag `--confg-property`. The `--config-property key=value` takes a key-value pair:
+
+* Key: `heron.kubernetes.[executor | manager].pod.template`
+* Value: `CONFIG-MAP-NAME.POD-TEMPLATE-NAME`
+
+Please note that you must concatenate `CONFIG-MAP-NAME` and `POD-TEMPLATE-NAME` with a **`.`** (period character).
+
+For example:
+
+```bash
+heron submit kubernetes \
+  --service-url=http://localhost:8001/api/v1/namespaces/default/services/heron-apiserver:9000/proxy \
+  ~/.heron/examples/heron-api-examples.jar \
+  org.apache.heron.examples.api.AckingTopology acking \
+  --config-property heron.kubernetes.executor.pod.template=CONFIG-MAP-NAME.POD-TEMPLATE-NAME \
+  --config-property heron.kubernetes.manager.pod.template=CONFIG-MAP-NAME.POD-TEMPLATE-NAME
+```
+
+### Heron Configured Items in Pod Templates
+
+Heron will locate the containers named `executor` and/or `manager` in the Pod Template and customize them as outlined below. All other containers within the Pod Templates will remain unchanged.
+
+#### Executor and Manager Containers
+
+All metadata for the `Heron containers` will be overwritten by Heron. In some other cases, values from the Pod Template for the `executor` and `manager` will be overwritten by Heron as outlined below.
+
+| Name | Description | Policy |
+|---|---|---|
+| `image` | The `Heron container`'s image. | Overwritten by Heron using values from the config.
+| `env` | Environment variables are made available within the container. The `HOST` and `POD_NAME` keys are required by Heron and are thus reserved. | Merged with Heron's values taking precedence. Deduplication is based on `name`.
+| `ports` | Port numbers opened within the container. Some of these port numbers are required by Heron and are thus reserved. The reserved ports are defined in Heron's constants as [`6001`-`6010`]. | Merged with Heron's values taking precedence. Deduplication is based on the `containerPort` value.
+| `limits` <br> `requests` | Heron will attempt to load values for `cpu` and `memory` from configs. | Heron's values take precedence over those in the Pod Templates.
+| `volumeMounts` | These are the mount points within the `Heron container` for the `volumes` available in the Pod. | Merged with Heron's values taking precedence. Deduplication is based on the `name` value.
+| Annotation: `prometheus.io/scrape` | Flag to indicate whether Prometheus logs can be scraped and is set to `true`. | Value is overridden by Heron. |
+| Annotation `prometheus.io/port` | Port address for Prometheus log scraping and is set to `8080`. | Values are overridden by Heron.
+| Annotation: Pod | Pod's revision/version hash. | Automatically set.
+| Annotation: Service | Labels services can use to attach to the Pod. | Automatically set.
+| Label: `app` | Name of the application launching the Pod and is set to `Heron`. | Values are overridden by Heron.
+| Label: `topology`| The name of topology which was provided when submitting. | User-defined and supplied on the CLI.
+
+#### Pod
+
+The following items will be set in the Pod Template's `spec` by Heron.
+
+| Name | Description | Policy |
+|---|---|---|
+`terminationGracePeriodSeconds` | Grace period to wait before shutting down the Pod after a `SIGTERM` signal and is set to `0` seconds. | Values are overridden by Heron.
+| `tolerations` | Attempts to schedule Pods with `taints` onto nodes hosting Pods with matching `taints`. The entries below are included by default. <br>  Keys:<br>`node.kubernetes.io/not-ready` <br> `node.kubernetes.io/unreachable` <br> Values (common):<br> `operator: Exists`<br> `effect: NoExecute`<br> `tolerationSeconds: 10L` | Merged with Heron's values taking precedence. Deduplication is based on the `key` value.
+| `containers` | Configurations for containers to be launched within the Pod. | All containers, excluding the `Heron container`s, are loaded as-is.
+| `volumes` | Volumes to be made available to the entire Pod. | Merged with Heron's values taking precedence. Deduplication is based on the `name` value.
+| `secretVolumes` | Secrets to be mounted as volumes within the Pod. | Loaded from the Heron configs if present.
+
+<br>
+
+---
+<br>
+
+## Adding Persistent Volumes via the Command-line Interface
+
+<br>
+
+> This section demonstrates how you can utilize both static and dynamically backed [Persistent Volume Claims](https://kubernetes.io/docs/concepts/storage/dynamic-provisioning/) in the `Executor` and `Manager` containers (hereinafter referred to as `Heron containers`). You will need to enable Dynamic Provisioning in your Kubernetes cluster to proceed to use the dynamic provisioning functionality.
+
+<br/>
+
+It is possible to leverage Persistent Volumes with custom Pod Templates but the Volumes you add will be shared between all  `Executor` Pods in the topology when customizing the `Executor`s.
+
+The CLI commands allow you to configure a Persistent Volume Claim (dynamically or statically backed) which will be unique and isolated to each Pod and mounted in a single `Heron container` when you submit your topology with a claim name of `OnDemand`. Using any claim name other than on `OnDemand` will permit you to configure a shared Persistent Volume without a custom Pod Template which will be shared between all `Executor` Pods when customizing them. The CLI commands override any config [...]
+
+Some use cases include process checkpointing, caching of results for later use in the process, intermediate results which could prove useful in analysis (ETL/ELT to a data lake or warehouse), as a source of data enrichment, etc.
+
+**Note:** Heron ***will*** remove any dynamically backed Persistent Volume Claims it creates when a topology is terminated. Please be aware that Heron uses the following `Labels` to locate the claims it has created:
+```yaml
+metadata:
+  labels:
+    topology: <topology-name>
+    onDemand: true
+```
+
+<br>
+
+> ***System Administrators:***
+>
+> * You may wish to disable the ability to configure Persistent Volume Claims specified via the CLI. To achieve this, you must pass the define option `-D heron.kubernetes.volume.from.cli.disabled=true`to the Heron API Server on the command line when launching. This command has been added to the Kubernetes configuration files to deploy the Heron API Server and is set to `false` by default.
+> * If you have a custom `Role`/`ClusterRole` for the Heron API Server you will need to ensure the `ServiceAccount` attached to the API server has the correct permissions to access the `Persistent Volume Claim`s:
+>
+>```yaml
+>rules:
+>- apiGroups: 
+>  - ""
+>  resources: 
+>  - persistentvolumeclaims
+>  verbs: 
+>  - create
+>  - delete
+>  - get
+>  - list
+>  - deletecollection
+>```
+
+<br>
+
+### Usage
+
+To configure a Persistent Volume Claim you must use the `--config-property` option with the `heron.kubernetes.[executor | manager].volumes.persistentVolumeClaim.` command prefix. Heron will not validate your Persistent Volume Claim configurations, so please validate them to ensure they are well-formed. All names must comply with the [*lowercase RFC-1123*](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/) standard.
+
+The command pattern is as follows:
+`heron.kubernetes.[executor | manager].volumes.persistentVolumeClaim.[VOLUME NAME].[OPTION]=[VALUE]`
+
+The currently supported CLI `options` are:
+
+* `claimName`
+* `storageClass`
+* `sizeLimit`
+* `accessModes`
+* `volumeMode`
+* `path`
+* `subPath`
+* `readOnly`
+
+***Note:*** A `claimName` of `OnDemand` will create unique Volumes for each `Heron container` as well as deploy a Persistent Volume Claim for each Volume. Any other claim name will result in a shared Volume being created between all Pods in the topology.
+
+***Note:*** The `accessModes` must be a comma-separated list of values *without* any white space. Valid values can be found in the [Kubernetes documentation](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes).
+
+***Note:*** If a `storageClassName` is specified and there are no matching Persistent Volumes then [dynamic provisioning](https://kubernetes.io/docs/concepts/storage/dynamic-provisioning/) must be enabled. Kubernetes will attempt to locate a Persistent Volume that matches the `storageClassName` before it attempts to use dynamic provisioning. If a `storageClassName` is not specified there must be [Persistent Volumes](https://kubernetes.io/docs/tasks/configure-pod-container/configure-persi [...]
+
+<br>
+
+#### Example
+
+A series of example commands to add `Persistent Volumes` to `Executor`s, and the `YAML` entries they make in their respective configurations, are as follows.
+
+***Dynamic:***
+
+```bash
+--config-property heron.kubernetes.executor.volumes.persistentVolumeClaim.volumenameofchoice.claimName=OnDemand
+--config-property heron.kubernetes.executor.volumes.persistentVolumeClaim.volumenameofchoice.storageClassName=storage-class-name-of-choice
+--config-property heron.kubernetes.executor.volumes.persistentVolumeClaim.volumenameofchoice.accessModes=comma,separated,list
+--config-property heron.kubernetes.executor.volumes.persistentVolumeClaim.volumenameofchoice.sizeLimit=555Gi
+--config-property heron.kubernetes.executor.volumes.persistentVolumeClaim.volumenameofchoice.volumeMode=volume-mode-of-choice
+--config-property heron.kubernetes.executor.volumes.persistentVolumeClaim.volumenameofchoice.path=/path/to/mount
+--config-property heron.kubernetes.executor.volumes.persistentVolumeClaim.volumenameofchoice.subPath=/sub/path/to/mount
+```
+
+Generated `Persistent Volume Claim`:
+
+```yaml
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+  labels:
+    app: heron
+    onDemand: "true"
+    topology: <topology-name>
+  name: volumenameofchoice-<topology-name>-[Ordinal]
+spec:
+  accessModes:
+  - comma
+  - separated
+  - list
+  resources:
+    requests:
+      storage: 555Gi
+  storageClassName: storage-class-name-of-choice
+  volumeMode: volume-mode-of-choice
+```
+
+Pod Spec entries for `Volume`:
+
+```yaml
+volumes:
+  - name: volumenameofchoice
+    persistentVolumeClaim:
+      claimName: volumenameofchoice-<topology-name>-[Ordinal]
+```
+
+`Executor` container entries for `Volume Mounts`:
+
+```yaml
+volumeMounts:
+  - mountPath: /path/to/mount
+    subPath: /sub/path/to/mount
+    name: volumenameofchoice
+```
+
+<br>
+
+***Static:***
+
+```bash
+--config-property heron.kubernetes.executor.volumes.persistentVolumeClaim.volumenameofchoice.claimName=OnDemand
+--config-property heron.kubernetes.executor.volumes.persistentVolumeClaim.volumenameofchoice.accessModes=comma,separated,list
+--config-property heron.kubernetes.executor.volumes.persistentVolumeClaim.volumenameofchoice.sizeLimit=555Gi
+--config-property heron.kubernetes.executor.volumes.persistentVolumeClaim.volumenameofchoice.volumeMode=volume-mode-of-choice
+--config-property heron.kubernetes.executor.volumes.persistentVolumeClaim.volumenameofchoice.path=/path/to/mount
+--config-property heron.kubernetes.executor.volumes.persistentVolumeClaim.volumenameofchoice.subPath=/sub/path/to/mount
+```
+
+Generated `Persistent Volume Claim`:
+
+```yaml
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+  labels:
+    app: heron
+    onDemand: "true"
+    topology: <topology-name>
+  name: volumenameofchoice-<topology-name>-[Ordinal]
+spec:
+  accessModes:
+  - comma
+  - separated
+  - list
+  resources:
+    requests:
+      storage: 555Gi
+  storageClassName: ""
+  volumeMode: volume-mode-of-choice
+```
+
+Pod Spec entries for `Volume`:
+
+```yaml
+volumes:
+  - name: volumenameofchoice
+    persistentVolumeClaim:
+      claimName: volumenameofchoice-<topology-name>-[Ordinal]
+```
+
+`Executor` container entries for `Volume Mounts`:
+
+```yaml
+volumeMounts:
+  - mountPath: /path/to/mount
+    subPath: /sub/path/to/mount
+    name: volumenameofchoice
+```
+
+<br>
+
+### Submitting
+
+A series of example commands to sumbit a topology using the *dynamic* example CLI commands above:
+
+```bash
+heron submit kubernetes \
+  --service-url=http://localhost:8001/api/v1/namespaces/default/services/heron-apiserver:9000/proxy \
+  ~/.heron/examples/heron-api-examples.jar \
+  org.apache.heron.examples.api.AckingTopology acking \
+--config-property heron.kubernetes.executor.volumes.persistentVolumeClaim.volumenameofchoice.claimName=OnDemand \
+--config-property heron.kubernetes.executor.volumes.persistentVolumeClaim.volumenameofchoice.storageClassName=storage-class-name-of-choice \
+--config-property heron.kubernetes.executor.volumes.persistentVolumeClaim.volumenameofchoice.accessModes=comma,separated,list \
+--config-property heron.kubernetes.executor.volumes.persistentVolumeClaim.volumenameofchoice.sizeLimit=555Gi \
+--config-property heron.kubernetes.executor.volumes.persistentVolumeClaim.volumenameofchoice.volumeMode=volume-mode-of-choice \
+--config-property heron.kubernetes.executor.volumes.persistentVolumeClaim.volumenameofchoice.path=/path/to/mount \
+--config-property heron.kubernetes.executor.volumes.persistentVolumeClaim.volumenameofchoice.subPath=/sub/path/to/mount
+```
+
+### Required and Optional Configuration Items
+
+The following table outlines CLI options which are either ***required*** ( &#x2705; ), ***optional*** ( &#x2754; ), or ***not available*** ( &#x274c; ) depending on if you are using dynamically/statically backed or shared `Volume`s.
+
+| Option | Dynamic | Static | Shared
+|---|---|---|---|
+| `VOLUME NAME` | &#x2705; | &#x2705; | &#x2705;
+| `claimName` | `OnDemand` | `OnDemand` | A valid name
+| `path` | &#x2705; | &#x2705; | &#x2705;
+| `subPath` | &#x2754; | &#x2754; | &#x2754;
+| `storageClassName` | &#x2705; | &#x274c; | &#x274c;
+| `accessModes` | &#x2705; | &#x2705; | &#x274c;
+| `sizeLimit` | &#x2754; | &#x2754; | &#x274c;
+| `volumeMode` | &#x2754; | &#x2754; | &#x274c;
+| `readOnly` | &#x2754; | &#x2754; | &#x2754;
+
+<br>
+
+***Note:*** The `VOLUME NAME` will be extracted from the CLI command and a `claimName` is a always required.
+
+<br>
+
+### Configuration Items Created and Entries Made
+
+The configuration items and entries in the tables below will made in their respective areas.
+
+A `Volume` and a `Volume Mount` will be created for each `volume name` which you specify. Additionally, one `Persistent Volume Claim` will be created for each `Volume` specified as dynamic using the `OnDemand` claim name.
+
+| Name | Description | Policy |
+|---|---|---|
+| `VOLUME NAME` | The `name` of the `Volume`. | Entries made in the `Persistent Volume Claim`'s spec, the Pod Spec's `Volumes`, and the `Heron container`'s `volumeMounts`.
+| `claimName` | A Claim name for the Persistent Volume. | If `OnDemand` is provided as the parameter then a unique Volume and Persistent Volume Claim will be created. Any other name will result in a shared Volume between all Pods in the topology with only a Volume and Volume Mount being added.
+| `path` | The `mountPath` of the `Volume`. | Entries made in the `Heron container`'s `volumeMounts`.
+| `subPath` | The `subPath` of the `Volume`. | Entries made in the `Heron container`'s `volumeMounts`.
+| `storageClassName` | The identifier name used to reference the dynamic `StorageClass`. | Entries made in the `Persistent Volume Claim` and Pod Spec's `Volume`.
+| `accessModes` | A comma-separated list of [access modes](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes). | Entries made in the `Persistent Volume Claim`.
+| `sizeLimit` | A resource request for storage space [units](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory). | Entries made in the `Persistent Volume Claim`.
+| `volumeMode` | Either `FileSystem` (default) or `Block` (raw block). [Read more](https://kubernetes.io/docs/concepts/storage/_print/#volume-mode). | Entries made in the `Persistent Volume Claim`.
+| Labels | Two labels for `topology` and `onDemand` provisioning are added. | These labels are only added to dynamically backed `Persistent Volume Claim`s created by Heron to support the removal of any claims created when a topology is terminated.
+
+<br>
+
+---
+
+<br>
+
+## Adding Empty Directory, Host Path, and Nework File System Volumes via the Command-line Interface
+
+<br>
+
+> This section demonstrates how you can specify configurations for `Empty Dir`, `Host Path`, and `NFS` volumes via the Command Line Interface during the submit process.
+
+<br/>
+
+It is possible to allocate and configure Volumes with Pod Templates but the CLI commands extend this to being able to specify Volumes at submission time.
+
+<br>
+
+> ***System Administrators:***
+>
+> * You may wish to disable the ability to configure Volume configurations specified via the CLI. To achieve this, you must pass the define option `-D heron.kubernetes.volume.from.cli.disabled=true`to the Heron API Server on the command line when launching. This command has been added to the Kubernetes configuration files to deploy the Heron API Server and is set to `false` by default.
+> * &#x26a0; ***WARNING*** &#x26a0; `Host Path` volumes have inherent [security concerns](https://kubernetes.io/docs/concepts/storage/volumes/#hostpath). `Host Path`s can breach the containment provided by containerization and should be exclusively used with volume mounts set to `read-only`, with usage limited to testing and development environments.
+
+<br>
+
+### Usage
+
+To configure a Volume on the CLI you must use the `--config-property` option in combination with the following prefixes:
+
+ * [Empty Directory](https://kubernetes.io/docs/concepts/storage/volumes/#emptydir): `heron.kubernetes.[executor | manager].volumes.emptyDir.`
+ * [Host Path](https://kubernetes.io/docs/concepts/storage/volumes/#hostpath): `heron.kubernetes.[executor | manager].volumes.hostPath.`
+ * [Network File System](https://kubernetes.io/docs/concepts/storage/volumes/#nfs): `heron.kubernetes.[executor | manager].volumes.nfs.`
+
+ Heron will not validate your Volume configurations, so please validate them to ensure they are well-formed. All Volume names must comply with the [*lowercase RFC-1123*](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/) standard.
+
+The command patterns are as follows:
+
+ * Empty Directory: `heron.kubernetes.[executor | manager].volumes.emptyDir.[VOLUME NAME].[OPTION]=[VALUE]`
+ * Host Path: `heron.kubernetes.[executor | manager].volumes.hostPath.[VOLUME NAME].[OPTION]=[VALUE]`
+ * Network File System: `heron.kubernetes.[executor | manager].volumes.nfs.[VOLUME NAME].[OPTION]=[VALUE]`
+
+The currently supported CLI `options` are:
+
+* `medium`
+* `type`
+* `server`
+* `sizeLimit`
+* `pathOnHost`
+* `pathOnNFS`
+* `path`
+* `subPath`
+* `readOnly`
+
+<br>
+
+#### Example
+
+A series of example commands to add Volumes to a `Manager`, and the `YAML` entries they make in their respective configurations, are as follows.
+
+***Empty Directory:***
+
+```bash
+--config-property heron.kubernetes.manager.volumes.emptyDir.manager-empty-dir.medium="Memory"
+--config-property heron.kubernetes.manager.volumes.emptyDir.manager-empty-dir.sizeLimit="50Mi"
+--config-property heron.kubernetes.manager.volumes.emptyDir.manager-empty-dir.path="empty/dir/path"
+--config-property heron.kubernetes.manager.volumes.emptyDir.manager-empty-dir.subPath="empty/dir/sub/path"
+--config-property heron.kubernetes.manager.volumes.emptyDir.manager-empty-dir.readOnly="true"
+```
+
+Generated `Volume` entry:
+
+```yaml
+volumes:
+- emptyDir:
+    medium: Memory
+    sizeLimit: 50Mi
+  name: manager-empty-dir
+```
+
+Generated `Volume Mount` entry:
+
+```yaml
+volumeMounts:
+- mountPath: empty/dir/path
+  name: manager-empty-dir
+  readOnly: true
+  subPath: empty/dir/sub/path
+```
+
+<br>
+
+***Host Path:***
+
+```bash
+--config-property heron.kubernetes.manager.volumes.hostPath.manager-host-path.type="File"
+--config-property heron.kubernetes.manager.volumes.hostPath.manager-host-path.pathOnHost="/dev/null"
+--config-property heron.kubernetes.manager.volumes.hostPath.manager-host-path.path="host/path/path"
+--config-property heron.kubernetes.manager.volumes.hostPath.manager-host-path.subPath="host/path/sub/path"
+--config-property heron.kubernetes.manager.volumes.hostPath.manager-host-path.readOnly="true"
+```
+
+Generated `Volume` entry:
+
+```yaml
+volumes:
+- hostPath:
+    path: /dev/null
+    type: File
+  name: manager-host-path
+```
+
+Generated `Volume Mount` entry:
+
+```yaml
+volumeMounts:
+- mountPath: host/path/path
+  name: manager-host-path
+  readOnly: true
+  subPath: host/path/sub/path
+```
+
+<br>
+
+***NFS:***
+
+```bash
+--config-property heron.kubernetes.manager.volumes.nfs.manager-nfs.server="nfs-server.address"
+--config-property heron.kubernetes.manager.volumes.nfs.manager-nfs.readOnly="true"
+--config-property heron.kubernetes.manager.volumes.nfs.manager-nfs.pathOnNFS="/dev/null"
+--config-property heron.kubernetes.manager.volumes.nfs.manager-nfs.path="nfs/path"
+--config-property heron.kubernetes.manager.volumes.nfs.manager-nfs.subPath="nfs/sub/path"
+--config-property heron.kubernetes.manager.volumes.nfs.manager-nfs.readOnly="true"
+```
+
+Generated `Volume` entry:
+
+```yaml
+volumes:
+- name: manager-nfs
+  nfs:
+    path: /dev/null
+    readOnly: true
+    server: nfs-server.address
+```
+
+Generated `Volume Mount` entry:
+
+```yaml
+volumeMounts:
+- mountPath: nfs/path
+  name: manager-nfs
+  readOnly: true
+  subPath: nfs/sub/path
+```
+
+<br>
+
+### Submitting
+
+A series of example commands to sumbit a topology using the example CLI commands above:
+
+```bash
+heron submit kubernetes \
+  --service-url=http://localhost:8001/api/v1/namespaces/default/services/heron-apiserver:9000/proxy \
+  ~/.heron/examples/heron-api-examples.jar \
+  org.apache.heron.examples.api.AckingTopology acking \
+\
+--config-property heron.kubernetes.manager.volumes.emptyDir.manager-empty-dir.medium="Memory" \
+--config-property heron.kubernetes.manager.volumes.emptyDir.manager-empty-dir.sizeLimit="50Mi" \
+--config-property heron.kubernetes.manager.volumes.emptyDir.manager-empty-dir.path="empty/dir/path" \
+--config-property heron.kubernetes.manager.volumes.emptyDir.manager-empty-dir.subPath="empty/dir/sub/path" \
+--config-property heron.kubernetes.manager.volumes.emptyDir.manager-empty-dir.readOnly="true" \
+\
+--config-property heron.kubernetes.manager.volumes.hostPath.manager-host-path.type="File" \
+--config-property heron.kubernetes.manager.volumes.hostPath.manager-host-path.pathOnHost="/dev/null" \
+--config-property heron.kubernetes.manager.volumes.hostPath.manager-host-path.path="host/path/path" \
+--config-property heron.kubernetes.manager.volumes.hostPath.manager-host-path.subPath="host/path/sub/path" \
+--config-property heron.kubernetes.manager.volumes.hostPath.manager-host-path.readOnly="true" \
+\
+--config-property heron.kubernetes.manager.volumes.nfs.manager-nfs.server="nfs-server.address" \
+--config-property heron.kubernetes.manager.volumes.nfs.manager-nfs.readOnly="true" \
+--config-property heron.kubernetes.manager.volumes.nfs.manager-nfs.pathOnNFS="/dev/null" \
+--config-property heron.kubernetes.manager.volumes.nfs.manager-nfs.path="nfs/path" \
+--config-property heron.kubernetes.manager.volumes.nfs.manager-nfs.subPath="nfs/sub/path" \
+--config-property heron.kubernetes.manager.volumes.nfs.manager-nfs.readOnly="true"
+```
+
+### Required and Optional Configuration Items
+
+The following table outlines CLI options which are either ***required*** ( &#x2705; ), ***optional*** ( &#x2754; ), or ***not available*** ( &#x274c; ) depending on the type of `Volume`.
+
+| Option | emptyDir | hostPath | NFS
+|---|---|---|---|
+| `VOLUME NAME` | &#x2705; | &#x2705; | &#x2705;
+| `path` | &#x2705; | &#x2705; | &#x2705;
+| `subPath` | &#x2754; | &#x2754; | &#x2754;
+| `readOnly` | &#x2754; | &#x2754; | &#x2754;
+| `medium` | &#x2754; | &#x274c; | &#x274c;
+| `sizeLimit` | &#x2754; | &#x274c; | &#x274c;
+| `pathOnHost` | &#x274c; | &#x2705; | &#x274c;
+| `type` | &#x274c; | &#x2754; | &#x274c;
+| `pathOnNFS` | &#x274c; | &#x274c; | &#x2705;
+| `server` | &#x274c; | &#x274c; | &#x2705;
+
+<br>
+
+***Note:*** The `VOLUME NAME` will be extracted from the CLI command.
+
+<br>
+
+### Configuration Items Created and Entries Made
+
+The configuration items and entries in the tables below will made in their respective areas.
+
+A `Volume` and a `Volume Mount` will be created for each `volume name` which you specify.
+
+| Name | Description | Policy |
+|---|---|---|
+| `VOLUME NAME` | The `name` of the `Volume`. | Entries are made in the Pod Spec's `Volumes`, and the `Heron container`'s `volumeMounts`.
+| `path` | The `mountPath` of the `Volume`. | Entries are made in the `Heron container`'s `volumeMounts`.
+| `subPath` | The `subPath` of the `Volume`. | Entries are made in the `Heron container`'s `volumeMounts`.
+| `readOnly` | A boolean value which defaults to `false` and indicates whether the medium has read-write permissions. | Entries are made in the `Heron container`s `volumeMount`. When used with an `NFS` volume an entry is also made in the associated `Volume`.
+| `medium` | The type of storage medium that will back the `Empty Dir` and defaults to "", please read more [here](https://kubernetes.io/docs/concepts/storage/volumes#emptydir). | An entry is made in the `Empty Dir`'s `Volume`.
+| `sizeLimit` | Total [amount](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory) of local storage required for this `Empty Dir` Volume. | An entry is made `Empty Dir`'s `Volume`.
+| `pathOnHost` | The directory path to be mounted the host. | A `path` entry is made `Host Path`'s `Volume`.
+| `type` | The type of the `Host Path` volume and defaults to "", please read more [here](https://kubernetes.io/docs/concepts/storage/volumes#hostpath). | An entry is made `Host Path`'s `Volume`.
+| `pathOnNFS` | The directory path to be mounted the NFS server. | A `path` entry is made `NFS`'s `Volume`.
+| `server` | The hostname or IP address of the NFS server. | An entry is made `NFS`'s `Volume`.
+
+<br>
+
+---
+
+<br>
+
+## Setting Limits and Requests via the Command Line Interface
+
+> This section demonstrates how you can configure a topology's `Executor` and/or `Manager` (hereinafter referred to as `Heron containers`) resource `Requests` and `Limits` through CLI commands.
+
+<br/>
+
+You may configure an individual topology's `Heron container`'s resource `Requests` and `Limits` during submission through CLI commands. The default behaviour is to acquire values for resources from Configurations and for them to be common between the `Executor`s and the `Manager` for a topology.
+
+<br>
+
+### Usage
+
+The command pattern is as follows:
+`heron.kubernetes.[executor | manager].[limits | requests].[OPTION]=[VALUE]`
+
+The currently supported CLI `options` and their associated `values` are:
+
+* `cpu`: A natural number indicating the number of [CPU units](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu).
+* `memory`: A natural number indicating the amount of [memory units](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory).
+
+<br>
+
+#### Example
+
+An example submission command is as follows.
+
+***Limits and Requests:***
+
+```bash
+~/bin/heron submit kubernetes ~/.heron/examples/heron-api-examples.jar \
+org.apache.heron.examples.api.AckingTopology acking \
+--config-property heron.kubernetes.manager.limits.cpu=2 \
+--config-property heron.kubernetes.manager.limits.memory=3 \
+--config-property heron.kubernetes.manager.requests.cpu=1 \
+--config-property heron.kubernetes.manager.requests.memory=2
+```
diff --git a/website2/website/versioned_docs/version-0.20.5-incubating/topology-development-topology-api-java.md b/website2/website/versioned_docs/version-0.20.5-incubating/topology-development-topology-api-java.md
new file mode 100644
index 00000000000..78eb0b9a47b
--- /dev/null
+++ b/website2/website/versioned_docs/version-0.20.5-incubating/topology-development-topology-api-java.md
@@ -0,0 +1,441 @@
+---
+id: version-0.20.5-incubating-topology-development-topology-api-java
+title: The Heron Topology API for Java
+sidebar_label: The Heron Topology API for Java
+original_id: topology-development-topology-api-java
+---
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+      http://www.apache.org/licenses/LICENSE-2.0
+    Unless required by applicable law or agreed to in writing,
+    software distributed under the License is distributed on an
+    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+    KIND, either express or implied.  See the License for the
+    specific language governing permissions and limitations
+    under the License.
+-->
+
+A topology specifies components like spouts and bolts, as well as the relation
+between components and proper configurations. The
+[`heron-api`](http://search.maven.org/#search%7Cgav%7C1%7Cg%3A%22org.apache.heron%22%20AND%20a%3A%22heron-api%22)
+enables you to create topology logic in Java.
+
+> If you're interested in creating stateful topologies with [effectively-once
+> semantics](heron-delivery-semantics) in Java, see [this new
+> guide](guides-effectively-once-java-topologies).
+
+## Getting started
+
+In order to use the Heron API for Java, you'll need to install the `heron-api` library, which is available
+via [Maven Central](http://search.maven.org/).
+
+### Maven setup
+
+To install the `heron-api` library using Maven, add this to the `dependencies` block of your `pom.xml`
+configuration file:
+
+```xml
+<dependency>
+    <groupId>org.apache.heron</groupId>
+    <artifactId>heron-api</artifactId>
+    <version>{{heron:version}}</version>
+</dependency>
+```
+
+#### Compiling a JAR with dependencies
+
+In order to run a Java topology in a Heron cluster, you'll need to package your topology as a "fat" JAR with dependencies included. You can use the [Maven Assembly Plugin](https://maven.apache.org/plugins/maven-assembly-plugin/usage.html) to generate JARs with dependencies. To install the plugin and add a Maven goal for a single JAR, add this to the `plugins` block in your `pom.xml`:
+
+```xml
+<plugin>
+    <artifactId>maven-assembly-plugin</artifactId>
+    <configuration>
+        <descriptorRefs>
+            <descriptorRef>jar-with-dependencies</descriptorRef>
+        </descriptorRefs>
+        <archive>
+            <manifest>
+                <mainClass></mainClass>
+            </manifest>
+        </archive>
+    </configuration>
+    <executions>
+        <execution>
+            <id>make-assembly</id>
+            <phase>package</phase>
+            <goals>
+                <goal>single</goal>
+            </goals>
+        </execution>
+    </executions>
+</plugin>
+```
+
+Once your `pom.xml` is properly set up, you can compile the JAR with dependencies using this command:
+
+```bash
+$ mvn package
+```
+
+By default, this will add a JAR in your project's `target` folder with the name `PROJECT-NAME-VERSION-jar-with-dependencies.jar`. Here's an example topology submission command using a compiled JAR:
+
+```bash
+$ mvn package
+$ heron submit local \
+  target/my-project-1.2.3-jar-with-dependencies.jar \
+  com.example.Main \
+  MyTopology arg1 arg2
+```
+
+### Writing your topology logic
+
+Heron [topologies](heron-topology-concpets) are processing graphs consisting
+of spouts that ingest data and bolts that process that data.
+
+> **Don't want to manually create spouts and bolts? Try the Heron Streamlet API.**  If you find manually creating and connecting spouts and bolts to be overly cumbersome, we recommend trying out the [Heron Streamlet API](topology-development-streamlet-api-java) for Java, which enables you to create your topology logic using a highly streamlined logic inspired by functional programming concepts.
+
+Once you've defined the spouts and bolts, a topology can be composed using a
+[`TopologyBuilder`](/api/org/apache/heron/api/topology/TopologyBuilder.html). The
+`TopologyBuilder` has two major methods used to specify topology components:
+
+Method | Description
+:------|:-----------
+`setBolt(String id, IRichBolt bolt, Number parallelismHint)` | `id` is the unique identifier that assigned to a bolt, `bolt` is the one previously composed, and `parallelismHint` is a number that specifies the number of instances of this bolt.
+`setSpout(String id, IRichSpout spout, Number parallelismHint)` | `id` is the unique identifier that assigned to a spout, `spout` is the one previously composed, and `parallelismHint` is a number that specifying the number of instances of this spout.
+
+Here's a simple example:
+
+```java
+
+TopologyBuilder builder = new TopologyBuilder();
+builder.setSpout("word", new TestWordSpout(), 5);
+builder.setBolt("exclaim", new ExclamationBolt(), 4);
+```
+
+In addition to the component specification, you also need to specify how tuples
+will be routed between your topology components. There are a few different grouping
+strategies available:
+
+Grouping strategy | Description
+:-----------------|:-----------
+Fields grouping | Tuples are transmitted to bolts based on a given field. Tuples with the same field will always go to the same bolt.
+Global grouping | All tuples are transmitted to a single instance of a bolt with the lowest task id.
+Shuffle Grouping | Tuples are randomly transmitted to different instances of a bolt.
+None grouping | Currently, this is the same as shuffle grouping.
+All grouping | All tuples are transmitted to all instances of a bolt.
+Custom grouping | User-defined grouping strategy.
+
+The following snippet is a simple example of specifying shuffle grouping
+between a `word` spout and an `exclaim` bolt.
+
+```java
+
+builder.setBolt("exclaim", new ExclamationBolt(), 4)
+  .shuffleGrouping("word");
+```
+
+Once the components and the grouping are specified, the topology can be built.
+
+```java
+HeronTopology topology = builder.createTopology();
+```
+
+See the [`ExclamationTopology`](https://github.com/apache/incubator-heron/blob/master/examples/src/java/org/apache/heron/examples/api/ExclamationTopology.java) for the complete example. More examples can be found in the  [`examples package`](https://github.com/apache/incubator-heron/tree/master/examples/src/java/org/apache/heron/examples).
+
+## Spouts
+
+A Heron **spout** is a source of streams, responsible for emitting
+[tuples](../../developers/data-model) into the topology. A spout may, for
+example, read data from a Kestrel queue or read tweets from the Twitter API and
+emit tuples to one or more bolts.
+
+Information on building spouts can be found in [Building
+Spouts](../../developers/java/spouts).
+
+### Implementing a Spout
+
+Spouts must implement the [`ISpout`](/api/org/apache/heron/api/spout/ISpout.html) interface.
+
+```java
+public interface ISpout extends Serializable {
+  void open(Map<String, Object> conf, TopologyContext context, SpoutOutputCollector collector);
+  void close();
+  void activate();
+  void deactivate();
+  void nextTuple();
+  void ack(Object msgId);
+  void fail(Object msgId);
+}
+```
+
+* The `open` method is called when the spout is initialized and provides the
+spout with the executing environment.
+
+* The `close` method is called when the spout is shutdown. There's no guarantee
+that this method is called due to how the instance is killed.
+
+* The `activate` method is called when the spout is asked to back into active
+state.
+
+* The `deactivate` method is called when the spout is asked to enter deactive
+state.
+
+* The `nextTuple` method is used to fetch tuples from input source and emit it
+to [`OutputCollector`](/api/org/apache/heron/api/bolt/).
+
+* The `ack` method is called when the `Tuple` with the `msgId` emitted by this
+spout is successfully processed.
+
+* The `fail` method is called when the `Tuple` with the `msgId` emitted by this
+spout is not processed successfully.
+
+See [`TestWordSpout`](https://github.com/apache/incubator-heron/blob/master/examples/src/java/org/apache/heron/examples/api/spout/TestWordSpout.java) for a simple spout example.
+
+Instead of implementing the [`ISpout`](/api/org/apache/heron/api/spout/ISpout.html) interface directly, you can also implement [`IRichSpout`](/api/org/apache/heron/api/spout/IRichSpout.html).
+
+
+## Bolts
+
+A Heron **bolt** consumes streams of
+[tuples](guides-data-model) emitted by spouts and performs some
+set of user-defined processing operations on those tuples, which may include
+performing complex stream transformations, performing storage operations,
+aggregating multiple streams into one, emitting tuples to other bolts within the
+topology, and much more.
+
+### Implementing a Bolt
+
+
+Spouts must implement the [`ISpout`](/api/org/apache/heron/api/spout/ISpout.html) interface.
+
+```java
+public interface ISpout extends Serializable {
+  void open(Map<String, Object> conf, TopologyContext context, SpoutOutputCollector collector);
+  void close();
+  void activate();
+  void deactivate();
+  void nextTuple();
+  void ack(Object msgId);
+  void fail(Object msgId);
+}
+```
+
+* The `open` method is called when the spout is initialized and provides the
+spout with the executing environment.
+
+* The `close` method is called when the spout is shutdown. There's no guarantee
+that this method is called due to how the instance is killed.
+
+* The `activate` method is called when the spout is asked to back into active
+state.
+
+* The `deactivate` method is called when the spout is asked to enter deactive
+state.
+
+* The `nextTuple` method is used to fetch tuples from input source and emit it
+to [`OutputCollector`](/api/org/apache/heron/api/bolt/).
+
+* The `ack` method is called when the `Tuple` with the `msgId` emitted by this
+spout is successfully processed.
+
+* The `fail` method is called when the `Tuple` with the `msgId` emitted by this
+spout is not processed successfully.
+
+See [`TestWordSpout`](https://github.com/apache/incubator-heron/blob/master/examples/src/java/org/apache/heron/examples/api/spout/TestWordSpout.java) for a simple spout example.
+
+Instead of implementing the [`ISpout`](/api/org/apache/heron/api/spout/ISpout.html) interface directly, you can also implement [`IRichSpout`](/api/org/apache/heron/api/spout/IRichSpout.html).
+
+## Applying delivery semantics to topologies
+
+```java
+import org.apache.heron.api.Config;
+
+Config topologyConfig = new Config();
+
+config.setTopologyReliabilityMode(Config.TopologyReliabilityMode.EFFECTIVELY_ONCE);
+```
+
+There are three delivery semantics available corresponding to the three delivery semantics that Heron provides:
+
+* `ATMOST_ONCE`
+* `ATLEAST_ONCE`
+* `EFFECTIVELY_ONCE`
+
+## Acking
+
+In distributed systems, an **ack** (short for "acknowledgment") is a message that confirms that some action has been taken. In Heron, you can create [bolts](#acking-bolts) that emit acks when some desired operation has occurred (for example data has been successfully stored in a database or a message has been successfully produced on a topic in a pub-sub messaging system). Those acks can then be received and acted upon by upstream [spouts](#ack-receiving-spouts).
+
+> You can see acking at work in a complete Heron topology in [this topology](https://github.com/apache/incubator-heron/blob/master/examples/src/java/org/apache/heron/examples/api/AckingTopology.java).
+
+Whereas acking a tuple indicates that some operation has succeeded, the opposite can be indicated when a bolt [fails](#failing) a tuple.
+
+### Acking bolts
+
+Each Heron bolt has an `OutputCollector` that can ack tuples using the `ack` method. Tuples can be acked inside the `execute` method that each bolt uses to process incoming tuples. *When* a bolt acks tuples is up to you. Tuples can be acked immediately upon receipt, after data has been saved to a database, after a message has been successfully published to a pub-sub topic, etc.
+
+Here's an example of a bolt that acks tuples when they're successfully processed:
+
+```java
+import org.apache.heron.api.bolt.BaseRichBolt;
+import org.apache.heron.api.bolt.OutputCollector;
+import org.apache.heron.api.topology.TopologyContext;
+
+public class AckingBolt extends BaseRichBolt {
+    private OutputCollector outputCollector;
+
+    public void prepare(Map conf, TopologyContext context, OutputCollector collector) {
+        this.outputCollector = collector;
+    }
+
+    private void applyProcessingOperation(Tuple tuple) throws Exception {
+        // Some processing logic for each tuple received by the bolt
+    }
+
+    public void execute(Tuple tuple) {
+        try {
+            applyProcessingOperation(tuple);
+            outputCollector.ack(tuple);
+        } catch (Exception e) {
+            outputCollector.fail(tuple);
+        }
+    }
+}
+```
+
+In this bolt, there's an `applyProcessingOperation` function that processes each incoming tuple. One of two things can result from this function:
+
+1. The operation succeeds, in which case the bolt sends an ack. Any upstream spouts---such as a spout like the `AckReceivingSpout` below---would then receive that ack, along with the message ID that the bolt provides.
+1. The operation fails and throws an exception, in which case the tuple is failed rather than acked.
+
+### Ack-receiving spouts
+
+Heron spouts don't emit acks, but they can receive acks when downstream bolts have acked a tuple. In order to receive an ack from downstream bolts, spouts need to do two things:
+
+1. [Specify](#specifying-a-message-id) a message ID when they emit tuples using the `nextTuple` method
+1. [Implement](#specifying-ack-reception-logic) an `ack` function that specifies what will happen when an ack is received from downstream bolts
+
+### Specifying a message ID
+
+If you want a spout to receive acks from downstream bolts, the spout needs to specify a message ID every time the spout's `SpoutOutputCollector` emits a tuple to downstream bolts. Here's an example:
+
+```java
+import org.apache.heron.api.spout.BaseRichSpout;
+
+public class AckReceivingSpout extends BaseRichSpout {
+    private Object generateMessageId() {
+        // Some logic to produce a unique ID
+    }
+
+    public void nextTuple() {
+        collector.emit(new Values(someValue), generateMessageId());
+    }
+}
+```
+
+In this example, each tuple emitted by the spout includes a unique message ID. If no ID is specified, as in the example below, then the spout simply *will not receive acks*:
+
+```java
+public class NoAckReceivedSpout extends BaseRichSpout {
+    public void nextTuple() {
+        collector.emit(new Values(someValue));
+    }
+}
+```
+
+> When implementing acking logic---as well as [failing logic](#failing)---each tuple that is acked/failed **must have a unique ID**. Otherwise, the spout receiving the ack will not be able to identify *which* tuple has been acked/failed.
+
+When specifying an ID for the tuple being emitted, the ID is of type `Object`, which means that you can serialize to/deserialize from any data type that you'd like. The message ID could thus be a simple `String` or `long` or something more complex, like a hash, `Map`, or POJO.
+
+### Specifying ack reception logic
+
+In order to specify what your spout does when an ack is received, you need to implement an `ack` function in your spout. That function takes a Java `Object` containing the tuple's ID, which means that you can potentially serialize the message ID to any type you'd like.
+
+In this example, the spout simply logs the message ID:
+
+```java
+public class AckReceivingSpout extends BaseRichSpout {
+    private Object generateMessageId() {
+        // Some logic to produce a unique ID
+    }
+
+    public void nextTuple() {
+        collector.emit(new Values(someValue), generateMessageId());
+    }
+
+    public void ack(Object messageId) {
+        // This will simply print the message ID whenever an ack arrives
+        System.out.println((String) messageId);
+    }
+}
+```
+
+In this example, the spout performs a series of actions when receiving the ack:
+
+```java
+public class AckReceivingSpout extends BaseRichSpout {
+    public void nextTuple() {
+        if (someCondition) {
+            String randomHash = // Generate a random hash as a message ID
+            collector.emit(new Values(val), randomHash);
+        }
+    }
+
+    public void ack(Object messageId) {
+        saveItemToDatabase(item);
+        publishToPubSubTopic(message);
+    }
+}
+```
+
+### Failing
+
+**Failing** a tuple is essentially the opposite of acking it, i.e. it indicates that some operation has failed. Bolts can fail tuples by calling the `fail` method on the `OutputCollector` rather than `ack`. Here's an example:
+
+
+```java
+public class AckingBolt extends BaseRichBolt {
+    public void execute(Tuple tuple) {
+        try {
+            someProcessingOperation(tuple);
+            collector.ack(tuple);
+        } catch (Exception e) {
+            collector.fail(tuple);
+        }
+    }
+}
+```
+
+In this example, an exception-throwing processing operation is attempted. If it succeeds, the tuple is acked; if it fails and an exception is thrown, the tuple is failed.
+
+As with acks, spouts can be set up to handle failed tuples by implementing the `fail` method, which takes the message ID as the argument (just like the `ack` method). Here's an example:
+
+```java
+public class AckReceivingSpout extends BaseRichSpout {
+    public void nextTuple() {
+        collector.emit(new Values(someValue), someMessageId);
+    }
+
+    public void fail(Object messageId) {
+        // Process the messageId
+    }
+}
+```
+
+As with acking, spouts must include a message ID when emitting tuples or else they will not receive fail messages.
+
+### Acking, failing, and timeouts
+
+If you're setting up your spouts and bolts to include an ack/fail logic, you can specify that a tuple will automatically be failed if a timeout threshold is reached before the tuple is acked. In this example, all tuples passing through all bolts will be failed if not acked within 10 seconds:
+
+```java
+import org.apache.heron.api.Config;
+
+Config config = new Config();
+config.setMessageTimeoutSecs(10);
+```
\ No newline at end of file
diff --git a/website2/website/versioned_docs/version-0.20.5-incubating/user-manuals-tracker-rest.md b/website2/website/versioned_docs/version-0.20.5-incubating/user-manuals-tracker-rest.md
new file mode 100644
index 00000000000..276fd35b727
--- /dev/null
+++ b/website2/website/versioned_docs/version-0.20.5-incubating/user-manuals-tracker-rest.md
@@ -0,0 +1,30 @@
+---
+id: version-0.20.5-incubating-user-manuals-tracker-rest
+title: Heron Tracker REST API
+sidebar_label: Heron Tracker REST API
+original_id: user-manuals-tracker-rest
+---
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+      http://www.apache.org/licenses/LICENSE-2.0
+    Unless required by applicable law or agreed to in writing,
+    software distributed under the License is distributed on an
+    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+    KIND, either express or implied.  See the License for the
+    specific language governing permissions and limitations
+    under the License.
+-->
+
+### Heron Tracker REST API
+The API's documentation is within the application and is served
+automatically when started. Example of accessing documentation:
+```shell
+# OpenAPI will be available at http://localhost:8080/
+heron-tracker --port=8080
+```
diff --git a/website2/website/versioned_sidebars/version-0.20.5-incubating-sidebars.json b/website2/website/versioned_sidebars/version-0.20.5-incubating-sidebars.json
new file mode 100644
index 00000000000..2290840a53b
--- /dev/null
+++ b/website2/website/versioned_sidebars/version-0.20.5-incubating-sidebars.json
@@ -0,0 +1,101 @@
+{
+  "version-0.20.5-incubating-docs": {
+    "Getting Started": [
+      "version-0.20.5-incubating-getting-started-local-single-node",
+      "version-0.20.5-incubating-getting-started-migrate-storm-topologies",
+      "version-0.20.5-incubating-getting-started-docker",
+      "version-0.20.5-incubating-getting-started-troubleshooting-guide"
+    ],
+    "Deployment": [
+      "version-0.20.5-incubating-deployment-overview",
+      "version-0.20.5-incubating-deployment-configuration",
+      "version-0.20.5-incubating-deployment-api-server"
+    ],
+    "Topology Development APIs": [
+      "version-0.20.5-incubating-topology-development-streamlet-api",
+      "version-0.20.5-incubating-topology-development-eco-api",
+      "version-0.20.5-incubating-topology-development-topology-api-java",
+      "version-0.20.5-incubating-topology-development-topology-api-python",
+      "version-0.20.5-incubating-topology-development-streamlet-scala"
+    ],
+    "Client API Docs": [
+      "version-0.20.5-incubating-client-api-docs-overview"
+    ],
+    "Guides": [
+      "version-0.20.5-incubating-guides-effectively-once-java-topologies",
+      "version-0.20.5-incubating-guides-data-model",
+      "version-0.20.5-incubating-guides-tuple-serialization",
+      "version-0.20.5-incubating-guides-ui-guide",
+      "version-0.20.5-incubating-guides-topology-tuning",
+      "version-0.20.5-incubating-guides-packing-algorithms",
+      "version-0.20.5-incubating-guides-simulator-mode",
+      "version-0.20.5-incubating-guides-troubeshooting-guide"
+    ],
+    "Heron Concepts": [
+      "version-0.20.5-incubating-heron-design-goals",
+      "version-0.20.5-incubating-heron-topology-concepts",
+      "version-0.20.5-incubating-heron-streamlet-concepts",
+      "version-0.20.5-incubating-heron-architecture",
+      "version-0.20.5-incubating-heron-delivery-semantics"
+    ],
+    "State Managers": [
+      "version-0.20.5-incubating-state-managers-zookeeper",
+      "version-0.20.5-incubating-state-managers-local-fs"
+    ],
+    "Uploaders": [
+      "version-0.20.5-incubating-uploaders-local-fs",
+      "version-0.20.5-incubating-uploaders-hdfs",
+      "version-0.20.5-incubating-uploaders-http",
+      "version-0.20.5-incubating-uploaders-amazon-s3",
+      "version-0.20.5-incubating-uploaders-scp"
+    ],
+    "Schedulers": [
+      "version-0.20.5-incubating-schedulers-k8s-by-hand",
+      "version-0.20.5-incubating-schedulers-k8s-with-helm",
+      "version-0.20.5-incubating-schedulers-k8s-execution-environment",
+      "version-0.20.5-incubating-schedulers-aurora-cluster",
+      "version-0.20.5-incubating-schedulers-aurora-local",
+      "version-0.20.5-incubating-schedulers-local",
+      "version-0.20.5-incubating-schedulers-nomad",
+      "version-0.20.5-incubating-schedulers-mesos-local-mac",
+      "version-0.20.5-incubating-schedulers-slurm",
+      "version-0.20.5-incubating-schedulers-yarn"
+    ],
+    "Cluster Configuration": [
+      "version-0.20.5-incubating-cluster-config-overview",
+      "version-0.20.5-incubating-cluster-config-system-level",
+      "version-0.20.5-incubating-cluster-config-instance",
+      "version-0.20.5-incubating-cluster-config-metrics",
+      "version-0.20.5-incubating-cluster-config-stream",
+      "version-0.20.5-incubating-cluster-config-tmanager"
+    ],
+    "Observability": [
+      "version-0.20.5-incubating-observability-prometheus",
+      "version-0.20.5-incubating-observability-graphite",
+      "version-0.20.5-incubating-observability-scribe"
+    ],
+    "User Manuals": [
+      "version-0.20.5-incubating-user-manuals-heron-cli",
+      "version-0.20.5-incubating-user-manuals-heron-explorer",
+      "version-0.20.5-incubating-user-manuals-tracker-rest",
+      "version-0.20.5-incubating-user-manuals-heron-tracker-runbook",
+      "version-0.20.5-incubating-user-manuals-heron-ui-runbook",
+      "version-0.20.5-incubating-user-manuals-heron-shell"
+    ],
+    "Compiling": [
+      "version-0.20.5-incubating-compiling-overview",
+      "version-0.20.5-incubating-compiling-linux",
+      "version-0.20.5-incubating-compiling-osx",
+      "version-0.20.5-incubating-compiling-docker",
+      "version-0.20.5-incubating-compiling-running-tests",
+      "version-0.20.5-incubating-compiling-code-organization"
+    ],
+    "Extending Heron": [
+      "version-0.20.5-incubating-extending-heron-scheduler",
+      "version-0.20.5-incubating-extending-heron-metric-sink"
+    ],
+    "Heron Resources": [
+      "version-0.20.5-incubating-heron-resources-resources"
+    ]
+  }
+}
diff --git a/website2/website/versions.json b/website2/website/versions.json
index 99b11450d93..4b701bcf085 100644
--- a/website2/website/versions.json
+++ b/website2/website/versions.json
@@ -1,4 +1,5 @@
 [
+  "0.20.5-incubating",
   "0.20.4-incubating",
   "0.20.3-incubating",
   "0.20.2-incubating",