You are viewing a plain text version of this content. The canonical link for it is here.
Posted to reviews@yunikorn.apache.org by wi...@apache.org on 2020/09/08 05:18:00 UTC

[incubator-yunikorn-site] 02/03: [YUNIKORN-392] Add v0.9.0 documentation (#21)

This is an automated email from the ASF dual-hosted git repository.

wilfreds pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-yunikorn-site.git

commit fa1ce3889bb489f5cf1d4f7f78ad459b67a55566
Author: Wilfred Spiegelenburg <wi...@apache.org>
AuthorDate: Tue Sep 8 14:42:39 2020 +1000

    [YUNIKORN-392] Add v0.9.0 documentation (#21)
    
    Closes: #21
---
 {static/img => docs/assets}/application-state.png  | Bin
 {static/img => docs/assets}/cpu_profile.jpg        | Bin
 {static/img => docs/assets}/dashboard_secret.png   | Bin
 .../img => docs/assets}/dashboard_token_select.png | Bin
 .../img => docs/assets}/docker-dektop-minikube.png | Bin
 {static/img => docs/assets}/docker-desktop.png     | Bin
 {static/img => docs/assets}/fifo-state-example.png | Bin
 {static/img => docs/assets}/goland_debug.jpg       | Bin
 docs/assets/goland_ide_pref_imports.png            | Bin 0 -> 71562 bytes
 docs/assets/goland_ide_pref_inspections.png        | Bin 0 -> 115226 bytes
 docs/assets/goland_ide_pref_other.png              | Bin 0 -> 51200 bytes
 docs/assets/goland_ide_pref_tabs.png               | Bin 0 -> 52260 bytes
 {static/img => docs/assets}/node-bin-packing.png   | Bin
 {static/img => docs/assets}/node-fair.png          | Bin
 {static/img => docs/assets}/object-state.png       | Bin
 {static/img => docs/assets}/pluggable-app-mgmt.jpg | Bin
 {static/img => docs/assets}/queue-fairness.png     | Bin
 .../assets}/resilience-node-recovery.jpg           | Bin
 .../img => docs/assets}/resilience-workflow.jpg    | Bin
 {static/img => docs/assets}/spark-jobs-on-ui.png   | Bin
 {static/img => docs/assets}/spark-pods.png         | Bin
 {static/img => docs/assets}/throughput.png         | Bin
 {static/img => docs/assets}/yk-ui-screenshots.gif  | Bin
 docs/design/pluggable_app_management.md            |   2 +-
 docs/design/resilience.md                          |   4 +-
 docs/design/scheduler_configuration.md             |   2 +-
 docs/design/scheduler_object_states.md             |   4 +-
 docs/design/state_aware_scheduling.md              |   2 +-
 docs/developer_guide/env_setup.md                  |  10 +-
 docs/get_started/get_started.md                    |   2 +-
 .../evaluate_perf_function_with_kubemark.md        |   8 +-
 docs/performance/profiling.md                      |   2 +-
 docs/user_guide/acls.md                            |   3 +-
 docs/user_guide/placement_rules.md                 |   2 +-
 docs/user_guide/queue_config.md                    |   2 +-
 docs/user_guide/workloads/run_spark.md             |   4 +-
 src/pages/index.js                                 |  13 +-
 static/img/logo/yunikorn-log-gray.png              | Bin 162302 -> 0 bytes
 .../yunikorn-logo-gray.png}                        | Bin
 static/img/{ => logo}/yunikorn_black_256.png       | Bin
 static/img/{ => logo}/yunikorn_black_white_256.png | Bin
 static/img/{ => logo}/yunikorn_blue_256.png        | Bin
 static/img/{ => logo}/yunikorn_classic_logo.png    | Bin
 static/img/{ => logo}/yunikorn_logo.svg            |   0
 static/img/{ => logo}/yunikorn_white_256.png       | Bin
 static/img/{ => logo}/yunikorn_white_logo.png      | Bin
 static/img/screenshot-apps.png                     | Bin 364789 -> 0 bytes
 static/img/screenshot-queues.png                   | Bin 278745 -> 0 bytes
 static/img/ui-screenshots.gif                      | Bin 2153401 -> 0 bytes
 .../version-0.8.0/assets}/cpu_profile.jpg          | Bin
 .../version-0.8.0/assets}/dashboard_secret.png     | Bin
 .../assets}/dashboard_token_select.png             | Bin
 .../assets}/docker-dektop-minikube.png             | Bin
 .../version-0.8.0/assets}/docker-desktop.png       | Bin
 .../version-0.8.0/assets}/goland_debug.jpg         | Bin
 .../assets/goland_ide_pref_imports.png             | Bin 0 -> 71562 bytes
 .../assets/goland_ide_pref_inspections.png         | Bin 0 -> 115226 bytes
 .../version-0.8.0/assets/goland_ide_pref_other.png | Bin 0 -> 51200 bytes
 .../version-0.8.0/assets/goland_ide_pref_tabs.png  | Bin 0 -> 52260 bytes
 .../version-0.8.0/assets}/node-bin-packing.png     | Bin
 .../version-0.8.0/assets}/node-fair.png            | Bin
 .../version-0.8.0/assets}/pluggable-app-mgmt.jpg   | Bin
 .../version-0.8.0/assets}/queue-fairness.png       | Bin
 .../assets}/resilience-node-recovery.jpg           | Bin
 .../version-0.8.0/assets}/resilience-workflow.jpg  | Bin
 .../version-0.8.0/assets}/throughput.png           | Bin
 .../version-0.8.0/community/coding_guidelines.md   |   8 +-
 .../design/pluggable_app_management.md             |   2 +-
 versioned_docs/version-0.8.0/design/resilience.md  |   4 +-
 versioned_docs/version-0.8.0/download.md           |  61 ---
 .../evaluate_perf_function_with_kubemark.md        |   8 +-
 .../version-0.8.0/performance/profiling.md         |   2 +-
 versioned_docs/version-0.8.0/roadmap.md            |  85 ----
 versioned_docs/version-0.8.0/setup/env_setup.md    |  10 +-
 versioned_docs/version-0.9.0/api/cluster.md        |  62 +++
 versioned_docs/version-0.9.0/api/scheduler.md      | 517 +++++++++++++++++++++
 versioned_docs/version-0.9.0/api/system.md         | 225 +++++++++
 .../version-0.9.0/assets}/application-state.png    | Bin
 .../version-0.9.0/assets}/architecture.png         | Bin
 .../version-0.9.0/assets}/cpu_profile.jpg          | Bin
 .../version-0.9.0/assets}/dashboard_secret.png     | Bin
 .../assets}/dashboard_token_select.png             | Bin
 .../assets}/docker-dektop-minikube.png             | Bin
 .../version-0.9.0/assets}/docker-desktop.png       | Bin
 .../version-0.9.0/assets}/fifo-state-example.png   | Bin
 .../version-0.9.0/assets}/goland_debug.jpg         | Bin
 .../version-0.9.0/assets}/node-bin-packing.png     | Bin
 .../version-0.9.0/assets}/node-fair.png            | Bin
 .../version-0.9.0/assets}/object-state.png         | Bin
 .../version-0.9.0/assets}/pluggable-app-mgmt.jpg   | Bin
 .../version-0.9.0/assets}/queue-fairness.png       | Bin
 .../assets}/resilience-node-recovery.jpg           | Bin
 .../version-0.9.0/assets}/resilience-workflow.jpg  | Bin
 .../version-0.9.0/assets}/spark-jobs-on-ui.png     | Bin
 .../version-0.9.0/assets}/spark-pods.png           | Bin
 .../version-0.9.0/assets}/throughput.png           | Bin
 .../version-0.9.0/assets}/yk-ui-screenshots.gif    | Bin
 .../version-0.9.0/design/architecture.md           |  62 +++
 .../version-0.9.0/design/cross_queue_preemption.md | 126 +++++
 versioned_docs/version-0.9.0/design/k8shim.md      |  83 ++++
 .../design/namespace_resource_quota.md             | 183 ++++++++
 .../design/pluggable_app_management.md             |   2 +-
 versioned_docs/version-0.9.0/design/predicates.md  |  80 ++++
 .../version-0.9.0}/design/resilience.md            |   4 +-
 .../design/scheduler_configuration.md              |   2 +-
 .../version-0.9.0/design/scheduler_core_design.md  | 395 ++++++++++++++++
 .../design/scheduler_object_states.md              |   4 +-
 .../design/state_aware_scheduling.md               |   2 +-
 .../version-0.9.0/developer_guide/build.md         | 166 +++++++
 .../version-0.9.0/developer_guide/deployment.md    | 122 +++++
 .../version-0.9.0}/developer_guide/env_setup.md    |  22 +-
 .../version-0.9.0/get_started/core_features.md     |  73 +++
 .../version-0.9.0}/get_started/get_started.md      |   6 +-
 .../evaluate_perf_function_with_kubemark.md        |   8 +-
 .../version-0.9.0/performance/metrics.md           |  72 +++
 .../version-0.9.0}/performance/profiling.md        |   2 +-
 .../version-0.9.0}/user_guide/acls.md              |   3 +-
 .../version-0.9.0}/user_guide/placement_rules.md   |   2 +-
 .../version-0.9.0}/user_guide/queue_config.md      |   2 +-
 .../user_guide/resource_quota_mgmt.md              | 152 ++++++
 .../version-0.9.0/user_guide/sorting_policies.md   | 154 ++++++
 .../version-0.9.0/user_guide/trouble_shooting.md   | 153 ++++++
 .../user_guide/workloads/run_flink.md              |  66 +++
 .../user_guide/workloads/run_spark.md              |   4 +-
 .../user_guide/workloads/run_tensorflow.md         |  40 ++
 versioned_sidebars/version-0.9.0-sidebars.json     | 177 +++++++
 versions.json                                      |   1 +
 127 files changed, 2988 insertions(+), 224 deletions(-)

diff --git a/static/img/application-state.png b/docs/assets/application-state.png
similarity index 100%
copy from static/img/application-state.png
copy to docs/assets/application-state.png
diff --git a/static/img/cpu_profile.jpg b/docs/assets/cpu_profile.jpg
similarity index 100%
copy from static/img/cpu_profile.jpg
copy to docs/assets/cpu_profile.jpg
diff --git a/static/img/dashboard_secret.png b/docs/assets/dashboard_secret.png
similarity index 100%
copy from static/img/dashboard_secret.png
copy to docs/assets/dashboard_secret.png
diff --git a/static/img/dashboard_token_select.png b/docs/assets/dashboard_token_select.png
similarity index 100%
copy from static/img/dashboard_token_select.png
copy to docs/assets/dashboard_token_select.png
diff --git a/static/img/docker-dektop-minikube.png b/docs/assets/docker-dektop-minikube.png
similarity index 100%
copy from static/img/docker-dektop-minikube.png
copy to docs/assets/docker-dektop-minikube.png
diff --git a/static/img/docker-desktop.png b/docs/assets/docker-desktop.png
similarity index 100%
copy from static/img/docker-desktop.png
copy to docs/assets/docker-desktop.png
diff --git a/static/img/fifo-state-example.png b/docs/assets/fifo-state-example.png
similarity index 100%
copy from static/img/fifo-state-example.png
copy to docs/assets/fifo-state-example.png
diff --git a/static/img/goland_debug.jpg b/docs/assets/goland_debug.jpg
similarity index 100%
copy from static/img/goland_debug.jpg
copy to docs/assets/goland_debug.jpg
diff --git a/docs/assets/goland_ide_pref_imports.png b/docs/assets/goland_ide_pref_imports.png
new file mode 100644
index 0000000..fbd9b00
Binary files /dev/null and b/docs/assets/goland_ide_pref_imports.png differ
diff --git a/docs/assets/goland_ide_pref_inspections.png b/docs/assets/goland_ide_pref_inspections.png
new file mode 100644
index 0000000..395e640
Binary files /dev/null and b/docs/assets/goland_ide_pref_inspections.png differ
diff --git a/docs/assets/goland_ide_pref_other.png b/docs/assets/goland_ide_pref_other.png
new file mode 100644
index 0000000..77e9908
Binary files /dev/null and b/docs/assets/goland_ide_pref_other.png differ
diff --git a/docs/assets/goland_ide_pref_tabs.png b/docs/assets/goland_ide_pref_tabs.png
new file mode 100644
index 0000000..f6b741a
Binary files /dev/null and b/docs/assets/goland_ide_pref_tabs.png differ
diff --git a/static/img/node-bin-packing.png b/docs/assets/node-bin-packing.png
similarity index 100%
copy from static/img/node-bin-packing.png
copy to docs/assets/node-bin-packing.png
diff --git a/static/img/node-fair.png b/docs/assets/node-fair.png
similarity index 100%
copy from static/img/node-fair.png
copy to docs/assets/node-fair.png
diff --git a/static/img/object-state.png b/docs/assets/object-state.png
similarity index 100%
copy from static/img/object-state.png
copy to docs/assets/object-state.png
diff --git a/static/img/pluggable-app-mgmt.jpg b/docs/assets/pluggable-app-mgmt.jpg
similarity index 100%
copy from static/img/pluggable-app-mgmt.jpg
copy to docs/assets/pluggable-app-mgmt.jpg
diff --git a/static/img/queue-fairness.png b/docs/assets/queue-fairness.png
similarity index 100%
copy from static/img/queue-fairness.png
copy to docs/assets/queue-fairness.png
diff --git a/static/img/resilience-node-recovery.jpg b/docs/assets/resilience-node-recovery.jpg
similarity index 100%
copy from static/img/resilience-node-recovery.jpg
copy to docs/assets/resilience-node-recovery.jpg
diff --git a/static/img/resilience-workflow.jpg b/docs/assets/resilience-workflow.jpg
similarity index 100%
copy from static/img/resilience-workflow.jpg
copy to docs/assets/resilience-workflow.jpg
diff --git a/static/img/spark-jobs-on-ui.png b/docs/assets/spark-jobs-on-ui.png
similarity index 100%
copy from static/img/spark-jobs-on-ui.png
copy to docs/assets/spark-jobs-on-ui.png
diff --git a/static/img/spark-pods.png b/docs/assets/spark-pods.png
similarity index 100%
copy from static/img/spark-pods.png
copy to docs/assets/spark-pods.png
diff --git a/static/img/throughput.png b/docs/assets/throughput.png
similarity index 100%
copy from static/img/throughput.png
copy to docs/assets/throughput.png
diff --git a/static/img/yk-ui-screenshots.gif b/docs/assets/yk-ui-screenshots.gif
similarity index 100%
copy from static/img/yk-ui-screenshots.gif
copy to docs/assets/yk-ui-screenshots.gif
diff --git a/docs/design/pluggable_app_management.md b/docs/design/pluggable_app_management.md
index 681e7bb..d297ada 100644
--- a/docs/design/pluggable_app_management.md
+++ b/docs/design/pluggable_app_management.md
@@ -58,7 +58,7 @@ The key issue here is we need a app-management interface, that can be easily ext
 It needs to be decoupled with existing scheduling logic. For each operator, we create a service to manage this type app's lifecycle,
 and communicate with the scheduling cache independently. The high-level design looks like below:
 
-![Pluggable App Management](/img/pluggable-app-mgmt.jpg)
+![Pluggable App Management](./../assets/pluggable-app-mgmt.jpg)
 
 Where
 - `AppManagementService` is a composite set of services that can be managed together.
diff --git a/docs/design/resilience.md b/docs/design/resilience.md
index 46aac13..c68a708 100644
--- a/docs/design/resilience.md
+++ b/docs/design/resilience.md
@@ -60,7 +60,7 @@ New -----------> Registered -----------> Recovering ----------> Running
 
 Following chart illustrate how yunikorn-core and shim works together on recovery.
 
-![Workflow](/img/resilience-workflow.jpg)
+![Workflow](./../assets/resilience-workflow.jpg)
 
 Restart (with recovery) process
 - yunikorn-shim registers itself with yunikorn-core
@@ -89,7 +89,7 @@ In the shim layer, it maintains states for each node and pods running on this no
 all nodes initially are considered as under `recovering`. Only when all pods running on this node are fully recovered,
 the node can be considered as `recovered`.
 
-![node-recovery](/img/resilience-node-recovery.jpg)
+![node-recovery](./../assets/resilience-node-recovery.jpg)
 
 Like demonstrated on upon diagram,
 
diff --git a/docs/design/scheduler_configuration.md b/docs/design/scheduler_configuration.md
index 5010da6..4f2c7cf 100644
--- a/docs/design/scheduler_configuration.md
+++ b/docs/design/scheduler_configuration.md
@@ -30,7 +30,7 @@ The scheduler configuration is mainly static. There is no need to change a web s
 
 From a separation of duty we can allow an operator that manages the cluster to make changes to the scheduler queues. You would not want to allow that administrator to change the scheduler configuration itself.
 
-Separated from the core scheduler configuration we have one or more shim configurations. We currently can not anticipate the deployment model of the scheduler and its shims. A shim, like the k8s-shim, might run in the same container or node but there is no guarantee it will. We also do not know the number of shims that will be used with one core scheduler. There is also still the possibility to have multiple instances of the same shim with one core scheduler.
+Separated from the core scheduler configuration we have one or more shim configurations. We currently cannot anticipate the deployment model of the scheduler and its shims. A shim, like the k8s-shim, might run in the same container or node but there is no guarantee it will. We also do not know the number of shims that will be used with one core scheduler. There is also still the possibility to have multiple instances of the same shim with one core scheduler.
 
 Shim configuration must be independent of the core scheduler configuration.
 ## Scheduler Configuration
diff --git a/docs/design/scheduler_object_states.md b/docs/design/scheduler_object_states.md
index d761658..c0a9db3 100644
--- a/docs/design/scheduler_object_states.md
+++ b/docs/design/scheduler_object_states.md
@@ -69,7 +69,7 @@ The events that can trigger a state change:
 * Kill: kill an application (source: resource manager)
 
 Here is a diagram that shows the states with the event that causes the state to change:  
-![application state diagram](/img/application-state.png)
+![application state diagram](./../assets/application-state.png)
 
 ### Object State
 <!-- fix the draining to stopped transition -->
@@ -96,7 +96,7 @@ The events that can trigger a state change:
 * Remove: mark an object for removal (source: core scheduler)
 
 Here is a diagram that shows the states with the event that causes the state to change:  
-![object state diagram](/img/object-state.png)
+![object state diagram](./../assets/object-state.png)
 
 ### Node
 <!-- should start using object state -->
diff --git a/docs/design/state_aware_scheduling.md b/docs/design/state_aware_scheduling.md
index 7cc35a1..f92f93c 100644
--- a/docs/design/state_aware_scheduling.md
+++ b/docs/design/state_aware_scheduling.md
@@ -97,7 +97,7 @@ Weighing those against each other the proposal is to not make this configurable.
 ### Example run
 Using Spark applications as an example: a new application can only be scheduled if the previous application has at least one (1) executor allocated.
 
-![images](/img/fifo-state-example.png)
+![images](./../assets/fifo-state-example.png)
 
 Assume we have the following Spark apps: App1 & App2 as in the diagram above. The applications were submitted in that order: App1 first, then App2. They were both submitted to the same queue.
 
diff --git a/docs/developer_guide/env_setup.md b/docs/developer_guide/env_setup.md
index 4d0f1b8..bfe70f5 100644
--- a/docs/developer_guide/env_setup.md
+++ b/docs/developer_guide/env_setup.md
@@ -37,7 +37,7 @@ Just simply follow the instruction [here](https://docs.docker.com/docker-for-mac
 
 Once Kubernetes is started in docker desktop, you should see something similar below:
 
-![Kubernetes in Docker Desktop](/img/docker-desktop.png)
+![Kubernetes in Docker Desktop](./../assets/docker-desktop.png)
 
 This means that:
 1. Kubernetes is running.
@@ -68,9 +68,9 @@ The dashboard as deployed in the previous step requires a token or config to sig
     ```
 3. copy the token value which is part of the `Data` section with the tag `token`.
 4. select the **Token** option in the dashboard web UI:<br/>
-    ![Token Access in dashboard](/img/dashboard_token_select.png)
+    ![Token Access in dashboard](./../assets/dashboard_token_select.png)
 5. paste the token value into the input box and sign in:<br/>
-    ![Token Access in dashboard](/img/dashboard_secret.png)
+    ![Token Access in dashboard](./../assets/dashboard_secret.png)
 
 ## Local Kubernetes cluster with Minikube
 Minikube can be added to an existing Docker Desktop install. Minikube can either use the pre-installed hypervisor or use a hypervisor of choice. These instructions use [HyperKit](https://github.com/moby/hyperkit) which is embedded in Docker Desktop.   
@@ -94,7 +94,7 @@ Check hypervisor
 Docker Desktop should have already installed HyperKit. In a
     ```
 1. update the minikube config to default to the HyperKit install
 `minikube config set vm-driver hyperkit`
 1. change docker desktop to use minikube for Kubernetes:<br/>
-    ![Kubernetes in Docker Desktop: minikube setting](/img/docker-dektop-minikube.png)
+    ![Kubernetes in Docker Desktop: minikube setting](./../assets/docker-dektop-minikube.png)
 
 ### Deploy and access the cluster
 After the installation is done you can start a new cluster.
@@ -116,7 +116,7 @@ Note, this instruction requires you have GoLand IDE for development.
 In GoLand, go to yunikorn-k8shim project. Then click "Run" -> "Debug..." -> "Edit Configuration..." to get the pop-up configuration window.
 Note, you need to click "+" to create a new profile if the `Go Build` option is not available at the first time.
 
-![Debug Configuration](/img/goland_debug.jpg)
+![Debug Configuration](./../assets/goland_debug.jpg)
 
 The highlighted fields are the configurations you need to add. These include:
 
diff --git a/docs/get_started/get_started.md b/docs/get_started/get_started.md
index cfb72a5..6fd26c0 100644
--- a/docs/get_started/get_started.md
+++ b/docs/get_started/get_started.md
@@ -65,7 +65,7 @@ kubectl port-forward svc/yunikorn-service 9080:9080 -n yunikorn
 `9889` is the default port for Web UI, `9080` is the default port of scheduler's Restful service where web UI retrieves info from.
 Once this is done, web UI will be available at: http://localhost:9889.
 
-![UI Screenshots](/img/yk-ui-screenshots.gif)
+![UI Screenshots](./../assets/yk-ui-screenshots.gif)
 
 YuniKorn UI provides a centralised view for cluster resource capacity, utilization, and all application info.
 
diff --git a/docs/performance/evaluate_perf_function_with_kubemark.md b/docs/performance/evaluate_perf_function_with_kubemark.md
index d3e7c6f..066596a 100644
--- a/docs/performance/evaluate_perf_function_with_kubemark.md
+++ b/docs/performance/evaluate_perf_function_with_kubemark.md
@@ -36,7 +36,7 @@ In YuniKorn, we have done lots of optimizations to improve the performance, such
 and low-latency sorting policies. The following chart reveals the scheduler throughput (by using Kubemark simulated
 environment, and submitting 50,000 pods), comparing to the K8s default scheduler.
 
-![Scheduler Throughput](/img/throughput.png)
+![Scheduler Throughput](./../assets/throughput.png)
 
 The charts record the time spent until all pods are running on the cluster
 
@@ -52,7 +52,7 @@ Each of YuniKorn queues has its guaranteed and maximum capacity. When we have lo
 YuniKorn ensures each of them gets its fair share. When we monitor the resource usage of these queues, we can clearly
 see how fairness was enforced:
 
-![Scheduler Throughput](/img/queue-fairness.png)
+![Scheduler Throughput](./../assets/queue-fairness.png)
 
 We set up 4 heterogeneous queues on this cluster, and submit different workloads against these queues.
 From the chart, we can see the queue resources are increasing nearly in the same trend, which means the resource
@@ -83,7 +83,7 @@ This means at the given time, this cluster has 100 nodes whose utilization is in
 it has 300 nodes whose utilization is in the range 10% - 20%, and so on… Now, we run lots of workloads and
 collect metrics, see the below chart:
 
-<img src="/img/node-fair.png" />
+<img src="./../assets/node-fair.png" />
 
 We can see all nodes have 0% utilization, and then all of them move to bucket-1, then bucket-2 … and eventually
 all nodes moved to bucket-9, which means all capacity is used. In another word, nodes’ resource has been used in
@@ -93,7 +93,7 @@ a fairness manner.
 
 This is When the bin-packing policy is enabled, we can see the following pattern:
 
-<img src="/img/node-bin-packing.png" />
+<img src="./../assets/node-bin-packing.png" />
 
 On the contrary, all nodes are moving between 2 buckets, bucket-0 and bucket-9. Nodes in bucket-0 (0% - 10%)
 are decreasing in a linear manner, and nodes in bucket-9 (90% - 100%) are increasing with the same curve.
diff --git a/docs/performance/profiling.md b/docs/performance/profiling.md
index dbbff84..4050ccd 100644
--- a/docs/performance/profiling.md
+++ b/docs/performance/profiling.md
@@ -60,7 +60,7 @@ you can type command such as `web` or `gif` to get a graph that helps you better
 understand the overall performance on critical code paths. You can get something
 like below:
 
-![CPU Profiling](/img/cpu_profile.jpg)
+![CPU Profiling](./../assets/cpu_profile.jpg)
 
 Note, in order to use these
 options, you need to install the virtualization tool `graphviz` first, if you are using Mac, simply run `brew install graphviz`, for more info please refer [here](https://graphviz.gitlab.io/).
diff --git a/docs/user_guide/acls.md b/docs/user_guide/acls.md
index 19887a8..8f41a80 100644
--- a/docs/user_guide/acls.md
+++ b/docs/user_guide/acls.md
@@ -23,7 +23,8 @@ under the License.
 -->
 
 :::caution
-Warning! This feature has not been fully implemented. Please use the wildcard ACL for now. 
+User information is currently not passed to the core scheduler from the kubernetes shim.
+Therefore, the recommendation is to use the wildcard ACL on the root queue for now as per the default configuration.
 :::
 
 ## Usage
diff --git a/docs/user_guide/placement_rules.md b/docs/user_guide/placement_rules.md
index e2ed44c..5f2c64d 100644
--- a/docs/user_guide/placement_rules.md
+++ b/docs/user_guide/placement_rules.md
@@ -22,7 +22,7 @@ specific language governing permissions and limitations
 under the License.
 -->
 
-The basics for the placement rules are described in the [scheduler design document](design/scheduler_configuration.md#placement-rules-definition).
+The basics for the placement rules are described in the [scheduler configuration design document](design/scheduler_configuration.md#placement-rules-definition).
 Multiple rules can be chained to form a placement policy.
 [Access control lists](user_guide/acls.md) and rule filters are defined per rule and enforced per rule.
 This document explains how to build a policy, including the rule usage, that is part of the scheduler with examples.
diff --git a/docs/user_guide/queue_config.md b/docs/user_guide/queue_config.md
index 007d5a0..fb3e4b8 100644
--- a/docs/user_guide/queue_config.md
+++ b/docs/user_guide/queue_config.md
@@ -22,7 +22,7 @@ specific language governing permissions and limitations
 under the License.
 -->
 
-The basis for the queue configuration is given in the [scheduler design document](design/architecture.md#configurations--semantics).
+The basis for the queue configuration is given in the [configuration design document](design/scheduler_configuration.md).
 
 This document provides the generic queue configuration.
 It references both the [Access control lists](user_guide/acls.md) and [placement rule](user_guide/placement_rules.md) documentation.
diff --git a/docs/user_guide/workloads/run_spark.md b/docs/user_guide/workloads/run_spark.md
index 0b1655a..cee95df 100644
--- a/docs/user_guide/workloads/run_spark.md
+++ b/docs/user_guide/workloads/run_spark.md
@@ -116,12 +116,12 @@ ${SPARK_HOME}/bin/spark-submit --master k8s://http://localhost:8001 --deploy-mod
 
 You'll see Spark driver and executors been created on Kubernetes:
 
-![spark-pods](/img/spark-pods.png)
+![spark-pods](./../../assets/spark-pods.png)
 
 You can also view the job info from YuniKorn UI. If you do not know how to access the YuniKorn UI, please read the document
 [here](../../get_started/get_started.md#access-the-web-ui).
 
-![spark-jobs-on-ui](/img/spark-jobs-on-ui.png)
+![spark-jobs-on-ui](./../../assets/spark-jobs-on-ui.png)
 
 ## What happens behind the scenes?
 
diff --git a/src/pages/index.js b/src/pages/index.js
index 033810b..417baac 100644
--- a/src/pages/index.js
+++ b/src/pages/index.js
@@ -14,7 +14,6 @@
  * limitations under the License.
 */
 
-import React from 'react';
 import classnames from 'classnames';
 import Layout from '@theme/Layout';
 import useDocusaurusContext from '@docusaurus/useDocusaurusContext';
@@ -23,7 +22,7 @@ import styles from './styles.module.css';
 const features = [
   {
     title: <>Scheduling Capabilities</>,
-    imageUrl: 'img/resource-scheduling.png',
+    imageUrl: '/img/resource-scheduling.png',
     description: (
       <>
       Builtin with comprehensive scheduling capabilities, including hierarchy
@@ -34,7 +33,7 @@ const features = [
   },
   {
     title: <>Resource Scheduler for K8s</>,
-    imageUrl: 'img/support-k8s.png',
+    imageUrl: '/img/support-k8s.png',
     description: (
       <>
       Fully K8s compatible, an alternative of the default K8s
@@ -44,7 +43,7 @@ const features = [
   },
   {
     title: <>Cloud Native</>,
-    imageUrl: 'img/cloud-native.png',
+    imageUrl: '/img/cloud-native.png',
     description: (
       <>
       Supports both on-prem and on-cloud use cases. When running on cloud, it works
@@ -60,7 +59,7 @@ function Home() {
   return (
     <Layout
       title={`Welcome to ${siteConfig.title}`}
-      description="Description will go into a meta tag in <head />">
+      description="Apache YuniKorn (incubating) website">
         <header className={classnames('hero', styles.heroBanner)}>
             <div className="container">
                 <div className="row">
@@ -76,7 +75,7 @@ function Home() {
                         </div>
                     </div>
                     <div className='hero_column_logo'>
-                        <img className={styles.heroImg} src="img/yunikorn_classic_logo.png"/>
+                        <img className={styles.heroImg} src="/img/logo/yunikorn_classic_logo.png"/>
                     </div>
                 </div>
             </div>
@@ -105,7 +104,7 @@ function Home() {
             <section className="features_src-pages-">
                 <div className="container">
                     <h3 className="text--center">Project Timeline</h3>
-                    <img src="/img/project-timeline.png" className={styles.timelineImage}/>
+                    <img src="img/project-timeline.png" className={styles.timelineImage}/>
                 </div>
             </section>
         </main>
diff --git a/static/img/logo/yunikorn-log-gray.png b/static/img/logo/yunikorn-log-gray.png
deleted file mode 100644
index 62850fd..0000000
Binary files a/static/img/logo/yunikorn-log-gray.png and /dev/null differ
diff --git a/static/img/yunikorn_white_256.png b/static/img/logo/yunikorn-logo-gray.png
similarity index 100%
copy from static/img/yunikorn_white_256.png
copy to static/img/logo/yunikorn-logo-gray.png
diff --git a/static/img/yunikorn_black_256.png b/static/img/logo/yunikorn_black_256.png
similarity index 100%
rename from static/img/yunikorn_black_256.png
rename to static/img/logo/yunikorn_black_256.png
diff --git a/static/img/yunikorn_black_white_256.png b/static/img/logo/yunikorn_black_white_256.png
similarity index 100%
rename from static/img/yunikorn_black_white_256.png
rename to static/img/logo/yunikorn_black_white_256.png
diff --git a/static/img/yunikorn_blue_256.png b/static/img/logo/yunikorn_blue_256.png
similarity index 100%
rename from static/img/yunikorn_blue_256.png
rename to static/img/logo/yunikorn_blue_256.png
diff --git a/static/img/yunikorn_classic_logo.png b/static/img/logo/yunikorn_classic_logo.png
similarity index 100%
rename from static/img/yunikorn_classic_logo.png
rename to static/img/logo/yunikorn_classic_logo.png
diff --git a/static/img/yunikorn_logo.svg b/static/img/logo/yunikorn_logo.svg
similarity index 100%
rename from static/img/yunikorn_logo.svg
rename to static/img/logo/yunikorn_logo.svg
diff --git a/static/img/yunikorn_white_256.png b/static/img/logo/yunikorn_white_256.png
similarity index 100%
rename from static/img/yunikorn_white_256.png
rename to static/img/logo/yunikorn_white_256.png
diff --git a/static/img/yunikorn_white_logo.png b/static/img/logo/yunikorn_white_logo.png
similarity index 100%
rename from static/img/yunikorn_white_logo.png
rename to static/img/logo/yunikorn_white_logo.png
diff --git a/static/img/screenshot-apps.png b/static/img/screenshot-apps.png
deleted file mode 100644
index 577098b..0000000
Binary files a/static/img/screenshot-apps.png and /dev/null differ
diff --git a/static/img/screenshot-queues.png b/static/img/screenshot-queues.png
deleted file mode 100644
index 72d8c43..0000000
Binary files a/static/img/screenshot-queues.png and /dev/null differ
diff --git a/static/img/ui-screenshots.gif b/static/img/ui-screenshots.gif
deleted file mode 100644
index 2a1c89f..0000000
Binary files a/static/img/ui-screenshots.gif and /dev/null differ
diff --git a/static/img/cpu_profile.jpg b/versioned_docs/version-0.8.0/assets/cpu_profile.jpg
similarity index 100%
copy from static/img/cpu_profile.jpg
copy to versioned_docs/version-0.8.0/assets/cpu_profile.jpg
diff --git a/static/img/dashboard_secret.png b/versioned_docs/version-0.8.0/assets/dashboard_secret.png
similarity index 100%
copy from static/img/dashboard_secret.png
copy to versioned_docs/version-0.8.0/assets/dashboard_secret.png
diff --git a/static/img/dashboard_token_select.png b/versioned_docs/version-0.8.0/assets/dashboard_token_select.png
similarity index 100%
copy from static/img/dashboard_token_select.png
copy to versioned_docs/version-0.8.0/assets/dashboard_token_select.png
diff --git a/static/img/docker-dektop-minikube.png b/versioned_docs/version-0.8.0/assets/docker-dektop-minikube.png
similarity index 100%
copy from static/img/docker-dektop-minikube.png
copy to versioned_docs/version-0.8.0/assets/docker-dektop-minikube.png
diff --git a/static/img/docker-desktop.png b/versioned_docs/version-0.8.0/assets/docker-desktop.png
similarity index 100%
copy from static/img/docker-desktop.png
copy to versioned_docs/version-0.8.0/assets/docker-desktop.png
diff --git a/static/img/goland_debug.jpg b/versioned_docs/version-0.8.0/assets/goland_debug.jpg
similarity index 100%
copy from static/img/goland_debug.jpg
copy to versioned_docs/version-0.8.0/assets/goland_debug.jpg
diff --git a/versioned_docs/version-0.8.0/assets/goland_ide_pref_imports.png b/versioned_docs/version-0.8.0/assets/goland_ide_pref_imports.png
new file mode 100644
index 0000000..fbd9b00
Binary files /dev/null and b/versioned_docs/version-0.8.0/assets/goland_ide_pref_imports.png differ
diff --git a/versioned_docs/version-0.8.0/assets/goland_ide_pref_inspections.png b/versioned_docs/version-0.8.0/assets/goland_ide_pref_inspections.png
new file mode 100644
index 0000000..395e640
Binary files /dev/null and b/versioned_docs/version-0.8.0/assets/goland_ide_pref_inspections.png differ
diff --git a/versioned_docs/version-0.8.0/assets/goland_ide_pref_other.png b/versioned_docs/version-0.8.0/assets/goland_ide_pref_other.png
new file mode 100644
index 0000000..77e9908
Binary files /dev/null and b/versioned_docs/version-0.8.0/assets/goland_ide_pref_other.png differ
diff --git a/versioned_docs/version-0.8.0/assets/goland_ide_pref_tabs.png b/versioned_docs/version-0.8.0/assets/goland_ide_pref_tabs.png
new file mode 100644
index 0000000..f6b741a
Binary files /dev/null and b/versioned_docs/version-0.8.0/assets/goland_ide_pref_tabs.png differ
diff --git a/static/img/node-bin-packing.png b/versioned_docs/version-0.8.0/assets/node-bin-packing.png
similarity index 100%
copy from static/img/node-bin-packing.png
copy to versioned_docs/version-0.8.0/assets/node-bin-packing.png
diff --git a/static/img/node-fair.png b/versioned_docs/version-0.8.0/assets/node-fair.png
similarity index 100%
copy from static/img/node-fair.png
copy to versioned_docs/version-0.8.0/assets/node-fair.png
diff --git a/static/img/pluggable-app-mgmt.jpg b/versioned_docs/version-0.8.0/assets/pluggable-app-mgmt.jpg
similarity index 100%
copy from static/img/pluggable-app-mgmt.jpg
copy to versioned_docs/version-0.8.0/assets/pluggable-app-mgmt.jpg
diff --git a/static/img/queue-fairness.png b/versioned_docs/version-0.8.0/assets/queue-fairness.png
similarity index 100%
copy from static/img/queue-fairness.png
copy to versioned_docs/version-0.8.0/assets/queue-fairness.png
diff --git a/static/img/resilience-node-recovery.jpg b/versioned_docs/version-0.8.0/assets/resilience-node-recovery.jpg
similarity index 100%
copy from static/img/resilience-node-recovery.jpg
copy to versioned_docs/version-0.8.0/assets/resilience-node-recovery.jpg
diff --git a/static/img/resilience-workflow.jpg b/versioned_docs/version-0.8.0/assets/resilience-workflow.jpg
similarity index 100%
copy from static/img/resilience-workflow.jpg
copy to versioned_docs/version-0.8.0/assets/resilience-workflow.jpg
diff --git a/static/img/throughput.png b/versioned_docs/version-0.8.0/assets/throughput.png
similarity index 100%
copy from static/img/throughput.png
copy to versioned_docs/version-0.8.0/assets/throughput.png
diff --git a/versioned_docs/version-0.8.0/community/coding_guidelines.md b/versioned_docs/version-0.8.0/community/coding_guidelines.md
index 8edb934..dc04365 100644
--- a/versioned_docs/version-0.8.0/community/coding_guidelines.md
+++ b/versioned_docs/version-0.8.0/community/coding_guidelines.md
@@ -115,11 +115,11 @@ There are three tabs to configure, the first two are crucial to comply with the
 
 | ||
 | -------- | ---------- |
-| Tabs |![tabs](/img/goland_ide_pref_tabs.png)|
-| Imports |![imports](/img/goland_ide_pref_imports.png)|
-| Other |![other](/img/goland_ide_pref_other.png)|
+| Tabs |![tabs](./../assets/goland_ide_pref_tabs.png)|
+| Imports |![imports](./../assets/goland_ide_pref_imports.png)|
+| Other |![other](./../assets/goland_ide_pref_other.png)|
 
 ### Inspections
 The default inspections except for one that helps highlight shadowing variables. 
 
-![inspections](/img/goland_ide_pref_inspections.png)
+![inspections](./../assets/goland_ide_pref_inspections.png)
diff --git a/versioned_docs/version-0.8.0/design/pluggable_app_management.md b/versioned_docs/version-0.8.0/design/pluggable_app_management.md
index 45c3872..ed54017 100644
--- a/versioned_docs/version-0.8.0/design/pluggable_app_management.md
+++ b/versioned_docs/version-0.8.0/design/pluggable_app_management.md
@@ -60,7 +60,7 @@ The key issue here is we need a app-management interface, that can be easily ext
 It needs to be decoupled with existing scheduling logic. For each operator, we create a service to manage this type app's lifecycle,
 and communicate with the scheduling cache independently. The high-level design looks like below:
 
-![Pluggable App Management](/img/pluggable-app-mgmt.jpg)
+![Pluggable App Management](./../assets/pluggable-app-mgmt.jpg)
 
 Where
 - `AppManagementService` is a composite set of services that can be managed together.
diff --git a/versioned_docs/version-0.8.0/design/resilience.md b/versioned_docs/version-0.8.0/design/resilience.md
index 7c3834a..d3789ef 100644
--- a/versioned_docs/version-0.8.0/design/resilience.md
+++ b/versioned_docs/version-0.8.0/design/resilience.md
@@ -62,7 +62,7 @@ New -----------> Registered -----------> Recovering ----------> Running
 
 Following chart illustrate how yunikorn-core and shim works together on recovery.
 
-![Workflow](/img/resilience-workflow.jpg)
+![Workflow](./../assets/resilience-workflow.jpg)
 
 Restart (with recovery) process
 - yunikorn-shim registers itself with yunikorn-core
@@ -91,7 +91,7 @@ In the shim layer, it maintains states for each node and pods running on this no
 all nodes initially are considered as under `recovering`. Only when all pods running on this node are fully recovered,
 the node can be considered as `recovered`.
 
-![node-recovery](/img/resilience-node-recovery.jpg)
+![node-recovery](./../assets/resilience-node-recovery.jpg)
 
 Like demonstrated on upon diagram,
 
diff --git a/versioned_docs/version-0.8.0/download.md b/versioned_docs/version-0.8.0/download.md
deleted file mode 100644
index 770e791..0000000
--- a/versioned_docs/version-0.8.0/download.md
+++ /dev/null
@@ -1,61 +0,0 @@
----
-id: download
-title: Apache YuniKorn (Incubating)
----
-
-<!--
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-  http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing,
-software distributed under the License is distributed on an
-"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-KIND, either express or implied.  See the License for the
-specific language governing permissions and limitations
-under the License.
--->
-
-Apache YuniKorn (Incubating) is released as source code tarballs. The downloads are distributed via mirror sites and should be checked for tampering using GPG or SHA-512.
-
-The latest release of Apache YuniKorn is v0.8.0.
-
-|  Version   | Release date  | Source download  | Docker images  | Release notes  |
-|  ----  | ----  | ----  | ----  | ----  |
-| v0.8.0  | 2020-05-04 | [Download](https://www.apache.org/dyn/closer.cgi/incubator/yunikorn/0.8.0-incubating/apache-yunikorn-0.8.0-incubating-src.tar.gz) ([Checksum](https://downloads.apache.org/incubator/yunikorn/0.8.0-incubating/apache-yunikorn-0.8.0-incubating-src.tar.gz.sha512) [Signature](https://downloads.apache.org/incubator/yunikorn/0.8.0-incubating/apache-yunikorn-0.8.0-incubating-src.tar.gz.asc)) | [Scheduler](https://hub.docker.com/layers/apache/yunikorn/scheduler-0.8.0/image [...]
-
-
-## Verifying the signature
-
-To verify the Apache YuniKorn release using GPG:
-
-- Download the release apache-yunikorn-X.Y.Z-incubating-src.tar.gz from a mirror site.
-- Download the signature file apache-yunikorn-X.Y.Z-incubating-src.tar.gz.asc from Apache.
-- Download the Apache YuniKorn [KEYS](https://downloads.apache.org/incubator/yunikorn/KEYS) file.
-- gpg –import KEYS
-- gpg –verify apache-yunikorn-X.Y.Z-incubating-X.Y.Z-src.tar.gz.asc
-
-## Verifying the checksum
-
-To verify the integrity of Apache YuniKorn release using the SHA-512 checksum:
-
-- Download the release apache-yunikorn-X.Y.Z-incubating-X.Y.Z-src.tar.gz from a mirror site.
-- Download the checksum apache-yunikorn-X.Y.Z-incubating-X.Y.Z-src.tar.gz.sha512 from Apache.
-- shasum –a 512 apache-yunikorn-X.Y.Z-incubating-X.Y.Z-src.tar.gz
-
-## All releases
-
-You can find all previous releases in the [Archive incubating repository](https://archive.apache.org/dist/incubator/yunikorn/).
-
-## License
-
-The software is licensed under [Apache License 2.0](http://www.apache.org/licenses/LICENSE-2.0)
-
-
-
diff --git a/versioned_docs/version-0.8.0/performance/evaluate_perf_function_with_kubemark.md b/versioned_docs/version-0.8.0/performance/evaluate_perf_function_with_kubemark.md
index 07abced..de9184d 100644
--- a/versioned_docs/version-0.8.0/performance/evaluate_perf_function_with_kubemark.md
+++ b/versioned_docs/version-0.8.0/performance/evaluate_perf_function_with_kubemark.md
@@ -35,7 +35,7 @@ In YuniKorn, we have done lots of optimizations to improve the performance, such
 and low-latency sorting policies. The following chart reveals the scheduler throughput (by using Kubemark simulated
 environment, and submitting 50,000 pods), comparing to the K8s default scheduler.
 
-![Scheduler Throughput](/img/throughput.png)
+![Scheduler Throughput](./../assets/throughput.png)
 
 The charts record the time spent until all pods are running on the cluster
 
@@ -51,7 +51,7 @@ Each of YuniKorn queues has its guaranteed and maximum capacity. When we have lo
 YuniKorn ensures each of them gets its fair share. When we monitor the resource usage of these queues, we can clearly
 see how fairness was enforced:
 
-![Scheduler Throughput](/img/queue-fairness.png)
+![Scheduler Throughput](./../assets/queue-fairness.png)
 
 We set up 4 heterogeneous queues on this cluster, and submit different workloads against these queues.
 From the chart, we can see the queue resources are increasing nearly in the same trend, which means the resource
@@ -82,7 +82,7 @@ This means at the given time, this cluster has 100 nodes whose utilization is in
 it has 300 nodes whose utilization is in the range 10% - 20%, and so on… Now, we run lots of workloads and
 collect metrics, see the below chart:
 
-<img src="/img/node-fair.png" />
+<img src="./../assets/node-fair.png" />
 
 We can see all nodes have 0% utilization, and then all of them move to bucket-1, then bucket-2 … and eventually
 all nodes moved to bucket-9, which means all capacity is used. In another word, nodes’ resource has been used in
@@ -92,7 +92,7 @@ a fairness manner.
 
 This is When the bin-packing policy is enabled, we can see the following pattern:
 
-<img src="/img/node-bin-packing.png" />
+<img src="./../assets/node-bin-packing.png" />
 
 On the contrary, all nodes are moving between 2 buckets, bucket-0 and bucket-9. Nodes in bucket-0 (0% - 10%)
 are decreasing in a linear manner, and nodes in bucket-9 (90% - 100%) are increasing with the same curve.
diff --git a/versioned_docs/version-0.8.0/performance/profiling.md b/versioned_docs/version-0.8.0/performance/profiling.md
index e224d57..52a9182 100644
--- a/versioned_docs/version-0.8.0/performance/profiling.md
+++ b/versioned_docs/version-0.8.0/performance/profiling.md
@@ -62,7 +62,7 @@ you can type command such as `web` or `gif` to get a graph that helps you better
 understand the overall performance on critical code paths. You can get something
 like below:
 
-![CPU Profiling](/img/cpu_profile.jpg)
+![CPU Profiling](./../assets/cpu_profile.jpg)
 
 Note, in order to use these
 options, you need to install the virtualization tool `graphviz` first, if you are using Mac, simply run `brew install graphviz`, for more info please refer [here](https://graphviz.gitlab.io/).
diff --git a/versioned_docs/version-0.8.0/roadmap.md b/versioned_docs/version-0.8.0/roadmap.md
deleted file mode 100644
index 768e3f4..0000000
--- a/versioned_docs/version-0.8.0/roadmap.md
+++ /dev/null
@@ -1,85 +0,0 @@
----
-id: roadmap
-title: Roadmap
----
-
-<!--
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-  http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing,
-software distributed under the License is distributed on an
-"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-KIND, either express or implied.  See the License for the
-specific language governing permissions and limitations
-under the License.
--->
-
-# YuniKorn Roadmap
-
-## What's next
-
-**yunikorn-core**
-
-* [YUNIKORN-1](https://issues.apache.org/jira/browse/YUNIKORN-1): Support app/task priority aware scheduling.
-* [YUNIKORN-2](https://issues.apache.org/jira/browse/YUNIKORN-2): Gang Scheduling.
-* [YUNIKORN-21](https://issues.apache.org/jira/browse/YUNIKORN-21): Optimize node sorting algorithms.
-* [YUNIKORN-42](https://issues.apache.org/jira/browse/YUNIKORN-42): High efficient scheduling events framework phase 1.
-* [YUNIKORN-33](https://issues.apache.org/jira/browse/YUNIKORN-33): Performance benchmark with Kubemark.
-* [YUNIKORN-131](https://issues.apache.org/jira/browse/YUNIKORN-131): Prometheus integration - phase 2.
-
-**yunikorn-k8shim**
-
-* [YUNIKORN-133](https://issues.apache.org/jira/browse/YUNIKORN-133): Performance improvement: optimize predicate function performance.
-* [YUNIKORN-42](https://issues.apache.org/jira/browse/YUNIKORN-42): Publish comprehensive scheduler events to K8s event system. 
-
-**yunikorn-web**
-
-* [YUNIKORN-83](https://issues.apache.org/jira/browse/YUNIKORN-83): Implements the nodes info page.
-
-## v0.8.0 (May 4, 2020)
-
-This release ships a fully functional resource scheduler for Kubernetes with a number of useful features that empower
-to run Big Data workloads on K8s. See more at [Release Notes](http://yunikorn.apache.org/release/v0.8.0.html).
-
-**yunikorn-scheduler-interface**
-
-* Communication protocols between RM and scheduler-shim.
-* gRPC interfaces.
-* Scheduler plugin interfaces.
-
-**yunikorn-core**
-
-* Hierarchy queues with min/max resource quotas.
-* Resource fairness between queues, users and apps.
-* Cross-queue preemption based on fairness.
-* Fair/Bin-packing scheduling policies.
-* Placement rules (auto queue creation/mapping).
-* Customized resource types (like GPU) scheduling support.
-* Rich placement constraints support.
-* Automatically map incoming container requests to queues by policies. 
-* Node partition: partition cluster to sub-clusters with dedicated quota/ACL management.
-* Configuration hot-refresh.
-* Stateful recovery.
-* Metrics framework.
-
-**yunikorn-k8shim**
-
-* Support K8s predicates. Such as pod affinity/anti-affinity, node selectors.
-* Support Persistent Volumes, Persistent Volume Claims, etc.
-* Load scheduler configuration from configmap dynamically (hot-refresh).
-* 3rd Operator/controller integration, pluggable app discovery.
-* Helm chart support.
-
-**yunikorn-web**
-
-* Cluster overview page with brief info about the cluster.
-* Read-only application view, including app info and task breakdown info.
-* Read-only queue view, displaying queue structure, queue resource, usage info dynamically.
diff --git a/versioned_docs/version-0.8.0/setup/env_setup.md b/versioned_docs/version-0.8.0/setup/env_setup.md
index 28bb2e2..c117e18 100644
--- a/versioned_docs/version-0.8.0/setup/env_setup.md
+++ b/versioned_docs/version-0.8.0/setup/env_setup.md
@@ -37,7 +37,7 @@ Just simply follow the instruction [here](https://docs.docker.com/docker-for-mac
 
 Once Kubernetes is started in docker desktop, you should see something similar below:
 
-![Kubernetes in Docker Desktop](/img/docker-desktop.png)
+![Kubernetes in Docker Desktop](./../assets/docker-desktop.png)
 
 This means that:
 1. Kubernetes is running.
@@ -68,9 +68,9 @@ The dashboard as deployed in the previous step requires a token or config to sig
     ```
 3. copy the token value which is part of the `Data` section with the tag `token`.
 4. select the **Token** option in the dashboard web UI:<br/>
-    ![Token Access in dashboard](/img/dashboard_token_select.png)
+    ![Token Access in dashboard](./../assets/dashboard_token_select.png)
 5. paste the token value into the input box and sign in:<br/>
-    ![Token Access in dashboard](/img/dashboard_secret.png)
+    ![Token Access in dashboard](./../assets/dashboard_secret.png)
 
 ## Local Kubernetes cluster with Minikube
 Minikube can be added to an existing Docker Desktop install. Minikube can either use the pre-installed hypervisor or use a hypervisor of choice. These instructions use [HyperKit](https://github.com/moby/hyperkit) which is embedded in Docker Desktop.   
@@ -94,7 +94,7 @@ Check hypervisor
 Docker Desktop should have already installed HyperKit. In a
     ```
 1. update the minikube config to default to the HyperKit install
 `minikube config set vm-driver hyperkit`
 1. change docker desktop to use minikube for Kubernetes:<br/>
-    ![Kubernetes in Docker Desktop: minikube setting](/img/docker-dektop-minikube.png)
+    ![Kubernetes in Docker Desktop: minikube setting](./../assets/docker-dektop-minikube.png)
 
 ### Deploy and access the cluster
 After the installation is done you can start a new cluster.
@@ -116,7 +116,7 @@ Note, this instruction requires you have GoLand IDE for development.
 In GoLand, go to yunikorn-k8shim project. Then click "Run" -> "Debug..." -> "Edit Configuration..." to get the pop-up configuration window.
 Note, you need to click "+" to create a new profile if the `Go Build` option is not available at the first time.
 
-![Debug Configuration](/img/goland_debug.jpg)
+![Debug Configuration](./../assets/goland_debug.jpg)
 
 The highlighted fields are the configurations you need to add. These include:
 
diff --git a/versioned_docs/version-0.9.0/api/cluster.md b/versioned_docs/version-0.9.0/api/cluster.md
new file mode 100644
index 0000000..bae308c
--- /dev/null
+++ b/versioned_docs/version-0.9.0/api/cluster.md
@@ -0,0 +1,62 @@
+---
+id: cluster
+title: Cluster
+---
+
+<!--
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an
+"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+KIND, either express or implied.  See the License for the
+specific language governing permissions and limitations
+under the License.
+-->
+
+## Clusters
+
+Returns general information about the clusters managed by the YuniKorn Scheduler. Information includes number of (total, failed, pending, running, completed) applications and containers.  
+
+**URL** : `/ws/v1/clusters`
+
+**Method** : `GET`
+
+**Auth required** : NO
+
+### Success response
+
+**Code** : `200 OK`
+
+**Content examples**
+
+As an example, here is a response from a 2-node cluster with 3 applications and 4 running containers.
+
+```json
+[
+    {
+        "clusterName": "kubernetes",
+        "totalApplications": "3",
+        "failedApplications": "1",
+        "pendingApplications": "",
+        "runningApplications": "3",
+        "completedApplications": "",
+        "totalContainers": "4",
+        "failedContainers": "",
+        "pendingContainers": "",
+        "runningContainers": "4",
+        "activeNodes": "2",
+        "totalNodes": "2",
+        "failedNodes": ""
+    }
+]
+```
+		
\ No newline at end of file
diff --git a/versioned_docs/version-0.9.0/api/scheduler.md b/versioned_docs/version-0.9.0/api/scheduler.md
new file mode 100644
index 0000000..d8e9452
--- /dev/null
+++ b/versioned_docs/version-0.9.0/api/scheduler.md
@@ -0,0 +1,517 @@
+---
+id: scheduler
+title: Scheduler
+---
+
+<!--
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an
+"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+KIND, either express or implied.  See the License for the
+specific language governing permissions and limitations
+under the License.
+-->
+
+## Queues
+
+Displays general information about the queues like name, status, capacities and properties. 
+The queues' hierarchy is kept in the response json.  
+
+**URL** : `/ws/v1/queues`
+
+**Method** : `GET`
+
+**Auth required** : NO
+
+### Success response
+
+**Code** : `200 OK`
+
+**Content examples**
+
+For the default queue hierarchy (only `root.default` leaf queue exists) a similar response to the following is sent back to the client:
+
+```json
+{
+    "partitionName": "[mycluster]default",
+    "capacity": {
+        "capacity": "map[ephemeral-storage:75850798569 hugepages-1Gi:0 hugepages-2Mi:0 memory:80000 pods:110 vcore:60000]",
+        "usedcapacity": "0"
+    },
+    "nodes": null,
+    "queues": {
+        "queuename": "root",
+        "status": "Active",
+        "capacities": {
+            "capacity": "[]",
+            "maxcapacity": "[ephemeral-storage:75850798569 hugepages-1Gi:0 hugepages-2Mi:0 memory:80000 pods:110 vcore:60000]",
+            "usedcapacity": "[memory:8000 vcore:8000]",
+            "absusedcapacity": "[memory:54 vcore:80]"
+        },
+        "queues": [
+            {
+                "queuename": "default",
+                "status": "Active",
+                "capacities": {
+                    "capacity": "[]",
+                    "maxcapacity": "[]",
+                    "usedcapacity": "[memory:8000 vcore:8000]",
+                    "absusedcapacity": "[]"
+                },
+                "queues": null,
+                "properties": {}
+            }
+        ],
+        "properties": {}
+    }
+}
+```
+
+## Applications
+
+Displays general information about the applications like used resources, queue name, submission time and allocations.
+
+**URL** : `/ws/v1/apps`
+
+**Method** : `GET`
+
+**Query Params** : 
+
+1. queue=<fully qualified queue name\>
+
+The fully qualified queue name used to filter the applications that run within the given queue. For example, "/ws/v1/apps?queue=root.default" returns the applications running in "root.default" queue.
+
+**Auth required** : NO
+
+### Success response
+
+**Code** : `200 OK`
+
+**Content examples**
+
+In the example below there are three allocations belonging to two applications. 
+
+```json
+[
+    {
+        "applicationID": "application-0002",
+        "usedResource": "[memory:4000 vcore:4000]",
+        "partition": "[mycluster]default",
+        "queueName": "root.default",
+        "submissionTime": 1595939756253216000,
+        "allocations": [
+            {
+                "allocationKey": "deb12221-6b56-4fe9-87db-ebfadce9aa20",
+                "allocationTags": null,
+                "uuid": "9af35d44-2d6f-40d1-b51d-758859e6b8a8",
+                "resource": "[memory:4000 vcore:4000]",
+                "priority": "<nil>",
+                "queueName": "root.default",
+                "nodeId": "node-0001",
+                "applicationId": "application-0002",
+                "partition": "default"
+            }
+        ],
+        "applicationState": "Running"
+    },
+    {
+        "applicationID": "application-0001",
+        "usedResource": "[memory:4000 vcore:4000]",
+        "partition": "[mycluster]default",
+        "queueName": "root.default",
+        "submissionTime": 1595939756253460000,
+        "allocations": [
+            {
+                "allocationKey": "54e5d77b-f4c3-4607-8038-03c9499dd99d",
+                "allocationTags": null,
+                "uuid": "08033f9a-4699-403c-9204-6333856b41bd",
+                "resource": "[memory:2000 vcore:2000]",
+                "priority": "<nil>",
+                "queueName": "root.default",
+                "nodeId": "node-0001",
+                "applicationId": "application-0001",
+                "partition": "default"
+            },
+            {
+                "allocationKey": "af3bd2f3-31c5-42dd-8f3f-c2298ebdec81",
+                "allocationTags": null,
+                "uuid": "96beeb45-5ed2-4c19-9a83-2ac807637b3b",
+                "resource": "[memory:2000 vcore:2000]",
+                "priority": "<nil>",
+                "queueName": "root.default",
+                "nodeId": "node-0002",
+                "applicationId": "application-0001",
+                "partition": "default"
+            }
+        ],
+        "applicationState": "Running"
+    }
+]
+```
+
+## Nodes
+
+Displays general information about the nodes managed by YuniKorn. 
+Node details include host and rack name, capacity, resources and allocations.
+
+**URL** : `/ws/v1/nodes`
+
+**Method** : `GET`
+
+**Auth required** : NO
+
+### Success response
+
+**Code** : `200 OK`
+
+**Content examples**
+
+Here you can see an example response from a 2-node cluster having 3 allocations.
+
+```json
+[
+    {
+        "partitionName": "[mycluster]default",
+        "nodesInfo": [
+            {
+                "nodeID": "node-0001",
+                "hostName": "",
+                "rackName": "",
+                "capacity": "[ephemeral-storage:75850798569 hugepages-1Gi:0 hugepages-2Mi:0 memory:14577 pods:110 vcore:10000]",
+                "allocated": "[memory:6000 vcore:6000]",
+                "occupied": "[memory:154 vcore:750]",
+                "available": "[ephemeral-storage:75850798569 hugepages-1Gi:0 hugepages-2Mi:0 memory:6423 pods:110 vcore:1250]",
+                "allocations": [
+                    {
+                        "allocationKey": "54e5d77b-f4c3-4607-8038-03c9499dd99d",
+                        "allocationTags": null,
+                        "uuid": "08033f9a-4699-403c-9204-6333856b41bd",
+                        "resource": "[memory:2000 vcore:2000]",
+                        "priority": "<nil>",
+                        "queueName": "root.default",
+                        "nodeId": "node-0001",
+                        "applicationId": "application-0001",
+                        "partition": "default"
+                    },
+                    {
+                        "allocationKey": "deb12221-6b56-4fe9-87db-ebfadce9aa20",
+                        "allocationTags": null,
+                        "uuid": "9af35d44-2d6f-40d1-b51d-758859e6b8a8",
+                        "resource": "[memory:4000 vcore:4000]",
+                        "priority": "<nil>",
+                        "queueName": "root.default",
+                        "nodeId": "node-0001",
+                        "applicationId": "application-0002",
+                        "partition": "default"
+                    }
+                ],
+                "schedulable": true
+            },
+            {
+                "nodeID": "node-0002",
+                "hostName": "",
+                "rackName": "",
+                "capacity": "[ephemeral-storage:75850798569 hugepages-1Gi:0 hugepages-2Mi:0 memory:14577 pods:110 vcore:10000]",
+                "allocated": "[memory:2000 vcore:2000]",
+                "occupied": "[memory:154 vcore:750]",
+                "available": "[ephemeral-storage:75850798569 hugepages-1Gi:0 hugepages-2Mi:0 memory:6423 pods:110 vcore:1250]",
+                "allocations": [
+                    {
+                        "allocationKey": "af3bd2f3-31c5-42dd-8f3f-c2298ebdec81",
+                        "allocationTags": null,
+                        "uuid": "96beeb45-5ed2-4c19-9a83-2ac807637b3b",
+                        "resource": "[memory:2000 vcore:2000]",
+                        "priority": "<nil>",
+                        "queueName": "root.default",
+                        "nodeId": "node-0002",
+                        "applicationId": "application-0001",
+                        "partition": "default"
+                    }
+                ],
+                "schedulable": true
+            }
+        ]
+    }
+]
+```
+
+## Goroutines info
+
+Dumps the stack traces of the currently running goroutines.
+
+**URL** : `/ws/v1/stack`
+
+**Method** : `GET`
+
+**Auth required** : NO
+
+### Success response
+
+**Code** : `200 OK`
+
+**Content examples**
+
+```text
+goroutine 356 [running
+]:
+github.com/apache/incubator-yunikorn-core/pkg/webservice.getStackInfo.func1(0x30a0060,
+0xc003e900e0,
+0x2)
+	/yunikorn/go/pkg/mod/github.com/apache/incubator-yunikorn-core@v0.0.0-20200717041747-f3e1c760c714/pkg/webservice/handlers.go: 41 +0xab
+github.com/apache/incubator-yunikorn-core/pkg/webservice.getStackInfo(0x30a0060,
+0xc003e900e0,
+0xc00029ba00)
+	/yunikorn/go/pkg/mod/github.com/apache/incubator-yunikorn-core@v0.0.0-20200717041747-f3e1c760c714/pkg/webservice/handlers.go: 48 +0x71
+net/http.HandlerFunc.ServeHTTP(0x2df0e10,
+0x30a0060,
+0xc003e900e0,
+0xc00029ba00)
+	/usr/local/go/src/net/http/server.go: 1995 +0x52
+github.com/apache/incubator-yunikorn-core/pkg/webservice.Logger.func1(0x30a0060,
+0xc003e900e0,
+0xc00029ba00)
+	/yunikorn/go/pkg/mod/github.com/apache/incubator-yunikorn-core@v0.0.0-20200717041747-f3e1c760c714/pkg/webservice/webservice.go: 65 +0xd4
+net/http.HandlerFunc.ServeHTTP(0xc00003a570,
+0x30a0060,
+0xc003e900e0,
+0xc00029ba00)
+	/usr/local/go/src/net/http/server.go: 1995 +0x52
+github.com/gorilla/mux.(*Router).ServeHTTP(0xc00029cb40,
+0x30a0060,
+0xc003e900e0,
+0xc0063fee00)
+	/yunikorn/go/pkg/mod/github.com/gorilla/mux@v1.7.3/mux.go: 212 +0x140
+net/http.serverHandler.ServeHTTP(0xc0000df520,
+0x30a0060,
+0xc003e900e0,
+0xc0063fee00)
+	/usr/local/go/src/net/http/server.go: 2774 +0xcf
+net/http.(*conn).serve(0xc0000eab40,
+0x30a61a0,
+0xc003b74000)
+	/usr/local/go/src/net/http/server.go: 1878 +0x812
+created by net/http.(*Server).Serve
+	/usr/local/go/src/net/http/server.go: 2884 +0x4c5
+
+goroutine 1 [chan receive,
+	26 minutes
+]:
+main.main()
+	/yunikorn/pkg/shim/main.go: 52 +0x67a
+
+goroutine 19 [syscall,
+	26 minutes
+]:
+os/signal.signal_recv(0x1096f91)
+	/usr/local/go/src/runtime/sigqueue.go: 139 +0x9f
+os/signal.loop()
+	/usr/local/go/src/os/signal/signal_unix.go: 23 +0x30
+created by os/signal.init.0
+	/usr/local/go/src/os/signal/signal_unix.go: 29 +0x4f
+
+...
+```
+
+## Metrics
+
+Endpoint to retrieve metrics from the Prometheus server. 
+The metrics are dumped with help messages and type information.
+
+**URL** : `/ws/v1/metrics`
+
+**Method** : `GET`
+
+**Auth required** : NO
+
+### Success response
+
+**Code** : `200 OK`
+
+**Content examples**
+
+```text
+# HELP go_gc_duration_seconds A summary of the pause duration of garbage collection cycles.
+# TYPE go_gc_duration_seconds summary
+go_gc_duration_seconds{quantile="0"} 2.567e-05
+go_gc_duration_seconds{quantile="0.25"} 3.5727e-05
+go_gc_duration_seconds{quantile="0.5"} 4.5144e-05
+go_gc_duration_seconds{quantile="0.75"} 6.0024e-05
+go_gc_duration_seconds{quantile="1"} 0.00022528
+go_gc_duration_seconds_sum 0.021561648
+go_gc_duration_seconds_count 436
+# HELP go_goroutines Number of goroutines that currently exist.
+# TYPE go_goroutines gauge
+go_goroutines 82
+# HELP go_info Information about the Go environment.
+# TYPE go_info gauge
+go_info{version="go1.12.17"} 1
+# HELP go_memstats_alloc_bytes Number of bytes allocated and still in use.
+# TYPE go_memstats_alloc_bytes gauge
+go_memstats_alloc_bytes 9.6866248e+07
+
+...
+
+# HELP yunikorn_scheduler_vcore_nodes_usage Nodes resource usage, by resource name.
+# TYPE yunikorn_scheduler_vcore_nodes_usage gauge
+yunikorn_scheduler_vcore_nodes_usage{range="(10%, 20%]"} 0
+yunikorn_scheduler_vcore_nodes_usage{range="(20%,30%]"} 0
+yunikorn_scheduler_vcore_nodes_usage{range="(30%,40%]"} 0
+yunikorn_scheduler_vcore_nodes_usage{range="(40%,50%]"} 0
+yunikorn_scheduler_vcore_nodes_usage{range="(50%,60%]"} 0
+yunikorn_scheduler_vcore_nodes_usage{range="(60%,70%]"} 0
+yunikorn_scheduler_vcore_nodes_usage{range="(70%,80%]"} 1
+yunikorn_scheduler_vcore_nodes_usage{range="(80%,90%]"} 0
+yunikorn_scheduler_vcore_nodes_usage{range="(90%,100%]"} 0
+yunikorn_scheduler_vcore_nodes_usage{range="[0,10%]"} 0
+```
+
+## Configuration validation
+
+**URL** : `/ws/v1/validate-conf`
+
+**Method** : `POST`
+
+**Auth required** : NO
+
+### Success response
+
+Regardless whether the configuration is allowed or not if the server was able to process the request, it will yield a 200 HTTP status code.
+
+**Code** : `200 OK`
+
+#### Allowed configuration
+
+Sending the following simple configuration yields an accept
+
+```yaml
+partitions:
+  - name: default
+    queues:
+      - name: root
+        queues:
+          - name: test
+```
+
+Reponse
+
+```json
+{
+    "allowed": true,
+    "reason": ""
+}
+```
+
+#### Disallowed configuration
+
+The following configuration is not allowed due to the "wrong_text" field put into the yaml file.
+
+```yaml
+partitions:
+  - name: default
+    queues:
+      - name: root
+        queues:
+          - name: test
+  - wrong_text
+```
+
+Reponse
+
+```json
+{
+    "allowed": false,
+    "reason": "yaml: unmarshal errors:\n  line 7: cannot unmarshal !!str `wrong_text` into configs.PartitionConfig"
+}
+```
+
+## Application history
+
+Endpoint to retrieve historical data about the number of total applications by timestamp.
+
+**URL** : `/ws/v1/history/apps`
+
+**Method** : `GET`
+
+**Auth required** : NO
+
+### Success response
+
+**Code** : `200 OK`
+
+**Content examples**
+
+```json
+[
+    {
+        "timestamp": 1595939966153460000,
+        "totalApplications": "1"
+    },
+    {
+        "timestamp": 1595940026152892000,
+        "totalApplications": "1"
+    },
+    {
+        "timestamp": 1595940086153799000,
+        "totalApplications": "2"
+    },
+    {
+        "timestamp": 1595940146154497000,
+        "totalApplications": "2"
+    },
+    {
+        "timestamp": 1595940206155187000,
+        "totalApplications": "2"
+    }
+]
+```
+
+## Container history
+
+Endpoint to retrieve historical data about the number of total containers by timestamp.
+
+**URL** : `/ws/v1/history/containers`
+
+**Method** : `GET`
+
+**Auth required** : NO
+
+### Success response
+
+**Code** : `200 OK`
+
+**Content examples**
+
+```json
+[
+    {
+        "timestamp": 1595939966153460000,
+        "totalContainers": "1"
+    },
+    {
+        "timestamp": 1595940026152892000,
+        "totalContainers": "1"
+    },
+    {
+        "timestamp": 1595940086153799000,
+        "totalContainers": "3"
+    },
+    {
+        "timestamp": 1595940146154497000,
+        "totalContainers": "3"
+    },
+    {
+        "timestamp": 1595940206155187000,
+        "totalContainers": "3"
+    }
+]
+```
diff --git a/versioned_docs/version-0.9.0/api/system.md b/versioned_docs/version-0.9.0/api/system.md
new file mode 100644
index 0000000..1d685ff
--- /dev/null
+++ b/versioned_docs/version-0.9.0/api/system.md
@@ -0,0 +1,225 @@
+---
+id: system
+title: System
+---
+
+<!--
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an
+"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+KIND, either express or implied.  See the License for the
+specific language governing permissions and limitations
+under the License.
+-->
+
+These endpoints are for the [pprof](https://github.com/google/pprof) profiling tool.
+
+## pprof
+
+**URL** : `/debug/pprof/`
+
+**Method** : `GET`
+
+### Success response
+
+**Code** : `200 OK`
+
+**Content examples**
+
+```text
+/debug/pprof/
+
+Types of profiles available:
+Count	Profile
+273	allocs
+0	block
+0	cmdline
+78	goroutine
+273	heap
+0	mutex
+0	profile
+29	threadcreate
+0	trace
+full goroutine stack dump
+Profile Descriptions:
+
+allocs: A sampling of all past memory allocations
+block: Stack traces that led to blocking on synchronization primitives
+cmdline: The command line invocation of the current program
+goroutine: Stack traces of all current goroutines
+heap: A sampling of memory allocations of live objects. You can specify the gc GET parameter to run GC before taking the heap sample.
+mutex: Stack traces of holders of contended mutexes
+profile: CPU profile. You can specify the duration in the seconds GET parameter. After you get the profile file, use the go tool pprof command to investigate the profile.
+threadcreate: Stack traces that led to the creation of new OS threads
+trace: A trace of execution of the current program. You can specify the duration in the seconds GET parameter. After you get the trace file, use the go tool trace command to investigate the trace.
+```
+
+## Heap
+
+**URL** : `/debug/pprof/heap`
+
+**Method** : `GET`
+
+### Success response
+
+**Code** : `200 OK`
+
+**Content examples**
+
+```proto
+// binary data from proto
+```
+
+## Thread create
+
+**URL** : `/debug/pprof/threadcreate`
+
+**Method** : `GET`
+
+### Success response
+
+**Code** : `200 OK`
+
+**Content examples**
+
+```proto
+// binary data from proto
+```
+
+## Goroutine
+
+**URL** : `/debug/pprof/goroutine`
+
+**Method** : `GET`
+
+### Success response
+
+**Code** : `200 OK`
+
+**Content examples**
+
+```proto
+// binary data from proto
+```
+
+## Allocations
+
+**URL** : `/debug/pprof/allocs`
+
+**Method** : `GET`
+
+### Success response
+
+**Code** : `200 OK`
+
+**Content examples**
+
+```proto
+// binary data from proto
+```
+
+## Block
+
+**URL** : `/debug/pprof/block`
+
+**Method** : `GET`
+
+### Success response
+
+**Code** : `200 OK`
+
+**Content examples**
+
+```proto
+// binary data from proto
+```
+
+## Mutex
+
+**URL** : `/debug/pprof/mutex`
+
+**Method** : `GET`
+
+### Success response
+
+**Code** : `200 OK`
+
+**Content examples**
+
+```proto
+// binary data from proto
+```
+
+## Cmdline
+
+**URL** : `/debug/pprof/cmdline`
+
+**Method** : `GET`
+
+### Success response
+
+**Code** : `200 OK`
+
+**Content examples**
+
+```proto
+// binary data from proto
+```
+
+## Profile
+
+**URL** : `/debug/pprof/profile`
+
+**Method** : `GET`
+
+### Success response
+
+**Code** : `200 OK`
+
+**Content examples**
+
+```proto
+// binary data from proto
+```
+
+## Symbol
+
+**URL** : `/debug/pprof/symbol`
+
+**Method** : `GET`
+
+### Success response
+
+**Code** : `200 OK`
+
+**Content examples**
+
+```proto
+// binary data from proto
+```
+
+## Trace		
+
+**URL** : `/debug/pprof/trace`
+
+**Method** : `GET`
+
+### Success response
+
+**Code** : `200 OK`
+
+**Content examples**
+
+```proto
+// binary data from proto
+```
diff --git a/static/img/application-state.png b/versioned_docs/version-0.9.0/assets/application-state.png
similarity index 100%
rename from static/img/application-state.png
rename to versioned_docs/version-0.9.0/assets/application-state.png
diff --git a/static/img/architecture.png b/versioned_docs/version-0.9.0/assets/architecture.png
similarity index 100%
rename from static/img/architecture.png
rename to versioned_docs/version-0.9.0/assets/architecture.png
diff --git a/static/img/cpu_profile.jpg b/versioned_docs/version-0.9.0/assets/cpu_profile.jpg
similarity index 100%
rename from static/img/cpu_profile.jpg
rename to versioned_docs/version-0.9.0/assets/cpu_profile.jpg
diff --git a/static/img/dashboard_secret.png b/versioned_docs/version-0.9.0/assets/dashboard_secret.png
similarity index 100%
rename from static/img/dashboard_secret.png
rename to versioned_docs/version-0.9.0/assets/dashboard_secret.png
diff --git a/static/img/dashboard_token_select.png b/versioned_docs/version-0.9.0/assets/dashboard_token_select.png
similarity index 100%
rename from static/img/dashboard_token_select.png
rename to versioned_docs/version-0.9.0/assets/dashboard_token_select.png
diff --git a/static/img/docker-dektop-minikube.png b/versioned_docs/version-0.9.0/assets/docker-dektop-minikube.png
similarity index 100%
rename from static/img/docker-dektop-minikube.png
rename to versioned_docs/version-0.9.0/assets/docker-dektop-minikube.png
diff --git a/static/img/docker-desktop.png b/versioned_docs/version-0.9.0/assets/docker-desktop.png
similarity index 100%
rename from static/img/docker-desktop.png
rename to versioned_docs/version-0.9.0/assets/docker-desktop.png
diff --git a/static/img/fifo-state-example.png b/versioned_docs/version-0.9.0/assets/fifo-state-example.png
similarity index 100%
rename from static/img/fifo-state-example.png
rename to versioned_docs/version-0.9.0/assets/fifo-state-example.png
diff --git a/static/img/goland_debug.jpg b/versioned_docs/version-0.9.0/assets/goland_debug.jpg
similarity index 100%
rename from static/img/goland_debug.jpg
rename to versioned_docs/version-0.9.0/assets/goland_debug.jpg
diff --git a/static/img/node-bin-packing.png b/versioned_docs/version-0.9.0/assets/node-bin-packing.png
similarity index 100%
rename from static/img/node-bin-packing.png
rename to versioned_docs/version-0.9.0/assets/node-bin-packing.png
diff --git a/static/img/node-fair.png b/versioned_docs/version-0.9.0/assets/node-fair.png
similarity index 100%
rename from static/img/node-fair.png
rename to versioned_docs/version-0.9.0/assets/node-fair.png
diff --git a/static/img/object-state.png b/versioned_docs/version-0.9.0/assets/object-state.png
similarity index 100%
rename from static/img/object-state.png
rename to versioned_docs/version-0.9.0/assets/object-state.png
diff --git a/static/img/pluggable-app-mgmt.jpg b/versioned_docs/version-0.9.0/assets/pluggable-app-mgmt.jpg
similarity index 100%
rename from static/img/pluggable-app-mgmt.jpg
rename to versioned_docs/version-0.9.0/assets/pluggable-app-mgmt.jpg
diff --git a/static/img/queue-fairness.png b/versioned_docs/version-0.9.0/assets/queue-fairness.png
similarity index 100%
rename from static/img/queue-fairness.png
rename to versioned_docs/version-0.9.0/assets/queue-fairness.png
diff --git a/static/img/resilience-node-recovery.jpg b/versioned_docs/version-0.9.0/assets/resilience-node-recovery.jpg
similarity index 100%
rename from static/img/resilience-node-recovery.jpg
rename to versioned_docs/version-0.9.0/assets/resilience-node-recovery.jpg
diff --git a/static/img/resilience-workflow.jpg b/versioned_docs/version-0.9.0/assets/resilience-workflow.jpg
similarity index 100%
rename from static/img/resilience-workflow.jpg
rename to versioned_docs/version-0.9.0/assets/resilience-workflow.jpg
diff --git a/static/img/spark-jobs-on-ui.png b/versioned_docs/version-0.9.0/assets/spark-jobs-on-ui.png
similarity index 100%
rename from static/img/spark-jobs-on-ui.png
rename to versioned_docs/version-0.9.0/assets/spark-jobs-on-ui.png
diff --git a/static/img/spark-pods.png b/versioned_docs/version-0.9.0/assets/spark-pods.png
similarity index 100%
rename from static/img/spark-pods.png
rename to versioned_docs/version-0.9.0/assets/spark-pods.png
diff --git a/static/img/throughput.png b/versioned_docs/version-0.9.0/assets/throughput.png
similarity index 100%
rename from static/img/throughput.png
rename to versioned_docs/version-0.9.0/assets/throughput.png
diff --git a/static/img/yk-ui-screenshots.gif b/versioned_docs/version-0.9.0/assets/yk-ui-screenshots.gif
similarity index 100%
rename from static/img/yk-ui-screenshots.gif
rename to versioned_docs/version-0.9.0/assets/yk-ui-screenshots.gif
diff --git a/versioned_docs/version-0.9.0/design/architecture.md b/versioned_docs/version-0.9.0/design/architecture.md
new file mode 100644
index 0000000..ca5c510
--- /dev/null
+++ b/versioned_docs/version-0.9.0/design/architecture.md
@@ -0,0 +1,62 @@
+---
+id: architecture
+title: Architecture
+---
+
+<!--
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an
+"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+KIND, either express or implied.  See the License for the
+specific language governing permissions and limitations
+under the License.
+-->
+
+Apache YuniKorn (Incubating) is a light-weight, universal resource scheduler for container orchestrator systems.
+It is created to achieve fine-grained resource sharing for various workloads efficiently on a large scale, multi-tenant,
+and cloud-native environment. YuniKorn brings a unified, cross-platform, scheduling experience for mixed workloads that
+consist of stateless batch workloads and stateful services.
+
+YuniKorn now supports K8s and can be deployed as a custom K8s scheduler. YuniKorn's architecture design also allows
+adding different shim layer and adopt to different ResourceManager implementation including Apache Hadoop YARN,
+or any other systems.
+
+## Architecture
+
+Following chart illustrates the high-level architecture of YuniKorn.
+
+<img src={require('./../assets/architecture.png').default} />
+![](./../assets/architecture.png)
+## Components
+
+### Scheduler interface
+
+[Scheduler interface](https://github.com/apache/incubator-yunikorn-scheduler-interface) is an abstract layer
+which resource management platform (like YARN/K8s) will speak with, via API like GRPC/programing language bindings.
+
+### Scheduler core
+
+Scheduler core encapsulates all scheduling algorithms, it collects resources from underneath resource management
+platforms (like YARN/K8s), and is responsible for container allocation requests. It makes the decision where is the
+best spot for each request and then sends response allocations to the resource management platform.
+Scheduler core is agnostic about underneath platforms, all the communications are through the [scheduler interface](https://github.com/apache/incubator-yunikorn-scheduler-interface).
+Please read more about the design of schedule core [here](scheduler_core_design.md).
+
+### Kubernetes shim
+
+The YuniKorn Kubernetes shim is responsible for talking to Kubernetes, it is responsible for translating the Kubernetes
+cluster resources, and resource requests via scheduler interface and send them to the scheduler core.
+And when a scheduler decision is made, it is responsible for binding the pod to the specific node. All the communication
+between the shim and the scheduler core is through the [scheduler interface](https://github.com/apache/incubator-yunikorn-scheduler-interface).
+Please read more about the design of the Kubernetes shim [here](k8shim.md)
+
diff --git a/versioned_docs/version-0.9.0/design/cross_queue_preemption.md b/versioned_docs/version-0.9.0/design/cross_queue_preemption.md
new file mode 100644
index 0000000..51c8033
--- /dev/null
+++ b/versioned_docs/version-0.9.0/design/cross_queue_preemption.md
@@ -0,0 +1,126 @@
+---
+id: cross_queue_preemption
+title: Cross Queue Preemption
+---
+
+<!--
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an
+"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+KIND, either express or implied.  See the License for the
+specific language governing permissions and limitations
+under the License.
+-->
+
+## Problems:
+
+According to lessons we learned from YARN Scheduler preemption. 
+
+**Here're top bad things:** 
+
+- Preemption is a shotgun instead of a sniper, when a preemption decision is made, nobody knows if preempted resources will go to demanding queue/app/user or not.
+- Preemption logic and allocation is different, we have to implement (and mimic) what we have done in scheduler allocation logic. 
+
+**Here're top good things:**
+
+- Preemption is fast (thanks to the shotgun), reclaiming thousands of containers only takes ~ 1 sec. 
+- We have understand how painful it is to handle DRF, multiple preemption policies (inter/intra-queue, shotgun/surgical preemption, etc.) And we have developed some good logic 
+to make sure better modularization and plug-ability  
+
+## Answer some questions for design/implementation choices
+
+**1\. Do we really want preemption-delay? (Or we just want to control pace)**
+
+In CS, we have preemption-delay, which select victims in preemption candidates, wait for a certain time before killing it. 
+
+The purposes of preemption delay are: a. give heads-up time to apps so 
+they can prepare bad things happen (unfortunately no app do anything for these heads up, at least from what I knew). b. control preemption pace.   
+
+And in practice, I found it causes a lot of issues, for example when a 
+cluster state keep changing, it is very hard to ensure accurate preemption. 
+
+**Proposal:**
+
+Remove the preemption-delay, keep the logics of controlling preemption pace. (such as ```yarn.resourcemanager.monitor.capacity.preemption
+.total_preemption_per_round```). And we can do allocation together with preemption.
+This don't mean containers will be stopped immediately after preemption issued. Instead, RM can control delays between signal a container and kill a container. Such as grace 
+termination of POD in K8s: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods   
+
+**2\. Do we want to do preemption for every scheduling logic, or we can do periodically?**
+
+In CS, we have preemption logic runs periodically, like every 1 sec or 3 sec. 
+
+Since preemption logic involves some heavy logics, like calculating shares of queues/apps. And when doing accurate preemption, we may need to scan nodes for preemption candidate. 
+Considering this, I propose to have preemption runs periodically. But it is important to note that, we need to try to use as much code as possible for 
+allocation-inside-preemption, otherwise there will be too much duplicated logic and very hard to be maintained in the future.
+
+**3\. Preemption cost and function**
+
+We found it is helpful to add cost for preemption, such as container live time, priority, type of container. It could be a cost function (Which returns a numeric value) or it 
+could be a comparator (which compare two allocations for preemption ask).
+
+## Pseudo code
+
+Logic of allocation (invoked every allocation cycle)
+
+```
+input:
+  - nAlloc, allocate N allocations for this allocation cycle.
+
+for partition: 
+  askCandidates := findAskCandidates(nAlloc, preemption=false)
+  
+  allocated, failed_to_allocated := tryAllocate(askCandidates);
+  
+  send-allocated-to-cache-to-commit;
+  
+  update-missed-opportunity (allocated, failed_to_allocated);
+  
+  nAlloc -= len(allocated)   
+```
+
+Logic of preemption (invoked every preemption cycle)
+
+```
+// It has to be done for every preemption-policy because calculation is different.
+for preemption-policy: 
+  preempt_results := policy.preempt()
+  for preempt_results: 
+     send-preempt-result-to-cache-to-commit;
+     updated-missed-opportunity (allocated)
+```
+
+Inside preemption policy
+
+```
+inter-queue-preempt-policy:
+  calculate-preemption-quotas;
+  
+  for partitions:
+    total_preempted := resource(0);
+    
+    while total_preempted < partition-limited:
+      // queues will be sorted by allocating - preempting
+      // And ignore any key in preemption_mask
+      askCandidates := findAskCandidates(N, preemption=true)
+      
+      preempt_results := tryAllocate(askCandidates, preemption=true);
+      
+      total_preempted += sigma(preempt_result.allocResource)
+      
+      send-allocated-to-cache-to-commit;
+      
+      update-missed-opportunity (allocated, failed_to_allocated);
+      
+      update-preemption-mask(askCandidates.allocKeys - preempt_results.allocKeys)
+```
\ No newline at end of file
diff --git a/versioned_docs/version-0.9.0/design/k8shim.md b/versioned_docs/version-0.9.0/design/k8shim.md
new file mode 100644
index 0000000..39ca4d1
--- /dev/null
+++ b/versioned_docs/version-0.9.0/design/k8shim.md
@@ -0,0 +1,83 @@
+---
+id: k8shim
+title: Kubernetes Shim Design
+---
+
+<!--
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an
+"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+KIND, either express or implied.  See the License for the
+specific language governing permissions and limitations
+under the License.
+-->
+
+Github repo: https://github.com/apache/incubator-yunikorn-k8shim
+
+Please read the [architecture](architecture.md) doc before reading this one, you will need to understand
+the 3 layer design of YuniKorn before getting to understand what is the Kubernetes shim.
+
+## The Kubernetes shim
+
+The YuniKorn Kubernetes shim is responsible for talking to Kubernetes, it is responsible for translating the Kubernetes
+cluster resources, and resource requests via scheduler interface and send them to the scheduler core.
+And when a scheduler decision is made, it is responsible for binding the pod to the specific node. All the communication
+between the shim and the scheduler core is through the scheduler-interface.
+
+## The admission controller
+
+The admission controller runs in a separate pod, it runs a
+[mutation webhook](https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#validatingadmissionwebhook)
+and a [validation webhook](https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#validatingadmissionwebhook), where:
+
+1. The `mutation webhook` mutates pod spec by:
+   - adding `schedulerName: yunikorn`
+     - by explicitly specifying the scheduler name, the pod will be scheduled by YuniKorn scheduler
+   - adding `applicationId` label
+     - when a label `applicationId` exists, reuse the given applicationId
+     - when a label `spark-app-selector` exists, reuse the given spark app ID
+     - otherwise, assign a generated application ID for this pod, using convention: `yunikorn-<namespace>-autogen`. this is unique per namespace. 
+   - adding `queue` label
+     - when a label `queue` exists, reuse the given queue name. Note, if placement rule is enabled, values set in the label is ignored
+     - otherwise, adds `queue: root.default`
+2. The `validation webhook` validates the configuration set in the configmap
+   - this is used to prevent writing malformed configuration into the configmap
+   - the validation webhook calls scheduler [validation REST API](api/scheduler.md#configuration-validation) to validate configmap updates
+
+### Admission controller deployment
+
+Currently, the deployment of the admission-controller is done as a `post-start` hook in the scheduler deployment, similarly, the
+uninstall is done as a `pre-stop` hook. See the related code [here](https://github.com/apache/incubator-yunikorn-release/blob/56e580af24ed3433e7d73d9ea556b19ad7b74337/helm-charts/yunikorn/templates/deployment.yaml#L80-L85).
+During the installation, it is expected to always co-locate the admission controller with the scheduler pod, this is done
+by adding the pod-affinity in the admission-controller pod, like:
+
+```yaml
+podAffinity:
+  requiredDuringSchedulingIgnoredDuringExecution:
+    - labelSelector:
+      matchExpressions:
+      - key: component
+        operator: In
+        values:
+        - yunikorn-scheduler
+      topologyKey: "kubernetes.io/hostname"
+```
+
+it also tolerates all the taints in case the scheduler pod has some toleration set.
+
+```yaml
+tolerations:
+- operator: "Exists"
+```
+
+
diff --git a/versioned_docs/version-0.9.0/design/namespace_resource_quota.md b/versioned_docs/version-0.9.0/design/namespace_resource_quota.md
new file mode 100644
index 0000000..90830b6
--- /dev/null
+++ b/versioned_docs/version-0.9.0/design/namespace_resource_quota.md
@@ -0,0 +1,183 @@
+---
+id: namespace_resource_quota
+title: Namespace Resource Quota
+---
+
+<!--
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an
+"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+KIND, either express or implied.  See the License for the
+specific language governing permissions and limitations
+under the License.
+-->
+
+In K8s, user can setup namespace with [resource quotas](https://kubernetes.io/docs/concepts/policy/resource-quotas/) to limit aggregated resource consumption in this namespace. The validation of namespace resource quotas is handled in api-server directly, therefore YuniKorn simply honors the quotas like the default scheduler.
+
+## Best practice
+
+It is not mandatory to setup YuniKorn queues with respect of namespaces.
+However, in practice, it makes more sense to do so.
+Namespace is often used to set a cap for resource consumptions per user-group/team,
+YuniKorn queue is also meant to divide cluster resource into multiple groups.
+Let's go through an example.
+
+### 1. Setup namespace
+
+Namespace: `advertisement`:
+```
+apiVersion: v1
+kind: ResourceQuota
+metadata:
+  name: advertisement
+spec:
+  hard:
+    requests.cpu: "200m"
+    requests.memory: 2000Mi
+    limits.cpu: "200m"
+    limits.memory: 4000Mi
+```
+Create the namespace
+```
+kubectl create namespace advertisement
+kubectl create -f ./advertisement.yaml --namespace=advertisement
+kubectl get quota --namespace=advertisement
+kubectl describe quota advertisement --namespace=advertisement
+
+// output
+Name:            advertisement
+Namespace:       advertisement
+Resource         Used  Hard
+--------         ----  ----
+limits.cpu       0     200m
+limits.memory    0     4000Mi
+requests.cpu     0     200m
+requests.memory  0     2000Mi
+```
+
+### 2. Setup YuniKorn queues
+
+Queue: `advertisement`:
+```
+name: advertisement
+resources:
+  guaranteed:
+    vcore: 100
+    memory: 1000
+  max:
+    vcore: 200
+    memory: 2000
+```
+
+ensure `QueueMaxResource <= NamespaceResourceQuotaRequests`
+
+### 3. Mapping applications to queues & namespace
+
+In a pod spec
+
+```
+apiVersion: v1
+kind: Pod
+metadata:
+  namespace: advertisement
+  labels:
+    app: sleep
+    applicationId: "application_2019_01_22_00001"
+    queue: "root.advertisement"
+  name: task0
+spec:
+  schedulerName: yunikorn
+  containers:
+    - name: sleep-5s
+      image: "alpine:latest"
+      command: ["/bin/ash", "-ec", "while :; do echo '.'; sleep 5 ; done"]
+      resources:
+        requests:
+          cpu: "50m"
+          memory: "800M"
+        limits:
+          cpu: "100m"
+          memory: "1000M"
+```
+
+Check Quota
+
+```
+kubectl describe quota advertisement --namespace=advertisement
+
+Name:            advertisement
+Namespace:       advertisement
+Resource         Used  Hard
+--------         ----  ----
+limits.cpu       100m  200m
+limits.memory    1G    4000Mi
+requests.cpu     50m   200m
+requests.memory  800M  2000Mi
+```
+
+Now submit another application,
+
+```
+apiVersion: v1
+kind: Pod
+metadata:
+  namespace: advertisement
+  labels:
+    app: sleep
+    applicationId: "application_2019_01_22_00002"
+    queue: "root.advertisement"
+  name: task1
+spec:
+  schedulerName: yunikorn
+  containers:
+    - name: sleep-5s
+      image: "alpine:latest"
+      command: ["/bin/ash", "-ec", "while :; do echo '.'; sleep 5 ; done"]
+      resources:
+        requests:
+          cpu: "200m"
+          memory: "800M"
+        limits:
+          cpu: "200m"
+          memory: "1000M"
+```
+
+pod will not be able to submitted to api-server, because the requested cpu `200m` + used cpu `100m` = `300m` which exceeds the resource quota.
+
+```
+kubectl create -f pod_ns_adv_task1.yaml
+Error from server (Forbidden): error when creating "pod_ns_adv_task1.yaml": pods "task1" is forbidden: exceeded quota: advertisement, requested: limits.cpu=200m,requests.cpu=200m, used: limits.cpu=100m,requests.cpu=50m, limited: limits.cpu=200m,requests.cpu=200m
+```
+
+## Future Work
+
+For compatibility, we should respect namespaces and resource quotas.
+Resource quota is overlapped with queue configuration in many ways,
+for example the `requests` quota is just like queue's max resource. However,
+there are still a few features resource quota can do but queue cannot, such as
+
+1. Resource `limits`. The aggregated resource from all pods in a namespace cannot exceed this limit.
+2. Storage Resource Quota, e.g storage size, PVC number, etc.
+3. Object Count Quotas, e.g count of PVCs, services, configmaps, etc.
+4. Resource Quota can map to priority class.
+
+Probably we can build something similar to cover (3) in this list.
+But it would be hard to completely support all these cases.
+
+But currently, setting applications mapping to a queue as well as a corresponding namespace is over complex.
+Some future improvements might be:
+
+1. Automatically detects namespaces in k8s-shim and map them to queues. Behind the scenes, we automatically generates queue configuration based on namespace definition. Generated queues are attached under root queue.
+2. When new namespace added/updated/removed, similarly to (1), we automatically update queues.
+3. User can add more configuration to queues, e.g add queue ACL, add child queues on the generated queues.
+4. Applications submitted to namespaces are transparently submitted to corresponding queues.
\ No newline at end of file
diff --git a/docs/design/pluggable_app_management.md b/versioned_docs/version-0.9.0/design/pluggable_app_management.md
similarity index 98%
copy from docs/design/pluggable_app_management.md
copy to versioned_docs/version-0.9.0/design/pluggable_app_management.md
index 681e7bb..d297ada 100644
--- a/docs/design/pluggable_app_management.md
+++ b/versioned_docs/version-0.9.0/design/pluggable_app_management.md
@@ -58,7 +58,7 @@ The key issue here is we need a app-management interface, that can be easily ext
 It needs to be decoupled with existing scheduling logic. For each operator, we create a service to manage this type app's lifecycle,
 and communicate with the scheduling cache independently. The high-level design looks like below:
 
-![Pluggable App Management](/img/pluggable-app-mgmt.jpg)
+![Pluggable App Management](./../assets/pluggable-app-mgmt.jpg)
 
 Where
 - `AppManagementService` is a composite set of services that can be managed together.
diff --git a/versioned_docs/version-0.9.0/design/predicates.md b/versioned_docs/version-0.9.0/design/predicates.md
new file mode 100644
index 0000000..f750cbd
--- /dev/null
+++ b/versioned_docs/version-0.9.0/design/predicates.md
@@ -0,0 +1,80 @@
+---
+id: predicates
+title: Support K8s Predicates
+---
+
+<!--
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*      http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+-->
+
+## Design
+
+Predicates are a set of pre-registered functions in K8s, the scheduler invokes these functions to check if a pod
+is eligible to be allocated onto a node. Common predicates are: node-selector, pod affinity/anti-affinity etc. To support
+these predicates in YuniKorn, we don't intend to re-implement everything on our own, but to re-use the core predicates
+code as much as possible.
+
+YuniKorn-core is agnostic about underneath RMs, so the predicates functions are implemented in K8s-shim as a `SchedulerPlugin`.
+SchedulerPlugin is a way to plug/extend scheduler capabilities. Shim can implement such plugin and register itself to
+yunikorn-core, so plugged function can be invoked in the scheduler core. Find all supported plugins in
+[types](https://github.com/apache/incubator-yunikorn-core/blob/master/pkg/plugins/types.go).
+
+## Workflow
+
+First, RM needs to register itself to yunikorn-core, it advertises what scheduler plugin interfaces are supported.
+E.g a RM could implement `PredicatePlugin` interface and register itself to yunikorn-core. Then yunikorn-core will
+call PredicatePlugin API to run predicates before making allocation decisions.
+
+
+Following workflow demonstrates how allocation looks like when predicates are involved.
+
+```
+pending pods: A, B
+shim sends requests to core, including A, B
+core starts to schedule A, B
+  partition -> queue -> app -> request
+    schedule A (1)
+      run predicates (3)
+        generate predicates metadata (4)
+        run predicate functions one by one with the metadata
+        success
+        proposal: A->N
+    schedule B (2)
+      run predicates (calling shim API)
+        generate predicates metadata
+        run predicate functions one by one with the metadata
+        success
+        proposal: B->N
+commit the allocation proposal for A and notify k8s-shim
+commit the allocation proposal for B and notify k8s-shim
+shim binds pod A to N
+shim binds pod B to N
+```
+
+(1) and (2) are running in parallel.
+
+(3) yunikorn-core calls a `schedulerPlugin` API to run predicates, this API is implemented on k8s-shim side.
+
+(4) K8s-shim generates metadata based on current scheduler cache, the metadata includes some intermittent states about nodes and pods.
+
+## Predicates White-list
+
+Intentionally, we only support a white-list of predicates. Majorly due to 2 reasons,
+* Predicate functions are time-consuming, it has negative impact on scheduler performance. To support predicates that are only necessary can minimize the impact. This will be configurable via CLI options;
+* The implementation depends heavily on K8s default scheduler code, though we reused some unit tests, the coverage is still a problem. We'll continue to improve the coverage when adding new predicates.
+
+the white-list currently is defined in [DefaultSchedulerPolicy](https://github.com/apache/incubator-yunikorn-k8shim/blob/master/pkg/plugin/predicates/predictor.go).
diff --git a/docs/design/resilience.md b/versioned_docs/version-0.9.0/design/resilience.md
similarity index 98%
copy from docs/design/resilience.md
copy to versioned_docs/version-0.9.0/design/resilience.md
index 46aac13..c68a708 100644
--- a/docs/design/resilience.md
+++ b/versioned_docs/version-0.9.0/design/resilience.md
@@ -60,7 +60,7 @@ New -----------> Registered -----------> Recovering ----------> Running
 
 Following chart illustrate how yunikorn-core and shim works together on recovery.
 
-![Workflow](/img/resilience-workflow.jpg)
+![Workflow](./../assets/resilience-workflow.jpg)
 
 Restart (with recovery) process
 - yunikorn-shim registers itself with yunikorn-core
@@ -89,7 +89,7 @@ In the shim layer, it maintains states for each node and pods running on this no
 all nodes initially are considered as under `recovering`. Only when all pods running on this node are fully recovered,
 the node can be considered as `recovered`.
 
-![node-recovery](/img/resilience-node-recovery.jpg)
+![node-recovery](./../assets/resilience-node-recovery.jpg)
 
 Like demonstrated on upon diagram,
 
diff --git a/docs/design/scheduler_configuration.md b/versioned_docs/version-0.9.0/design/scheduler_configuration.md
similarity index 97%
copy from docs/design/scheduler_configuration.md
copy to versioned_docs/version-0.9.0/design/scheduler_configuration.md
index 5010da6..4f2c7cf 100644
--- a/docs/design/scheduler_configuration.md
+++ b/versioned_docs/version-0.9.0/design/scheduler_configuration.md
@@ -30,7 +30,7 @@ The scheduler configuration is mainly static. There is no need to change a web s
 
 From a separation of duty we can allow an operator that manages the cluster to make changes to the scheduler queues. You would not want to allow that administrator to change the scheduler configuration itself.
 
-Separated from the core scheduler configuration we have one or more shim configurations. We currently can not anticipate the deployment model of the scheduler and its shims. A shim, like the k8s-shim, might run in the same container or node but there is no guarantee it will. We also do not know the number of shims that will be used with one core scheduler. There is also still the possibility to have multiple instances of the same shim with one core scheduler.
+Separated from the core scheduler configuration we have one or more shim configurations. We currently cannot anticipate the deployment model of the scheduler and its shims. A shim, like the k8s-shim, might run in the same container or node but there is no guarantee it will. We also do not know the number of shims that will be used with one core scheduler. There is also still the possibility to have multiple instances of the same shim with one core scheduler.
 
 Shim configuration must be independent of the core scheduler configuration.
 ## Scheduler Configuration
diff --git a/versioned_docs/version-0.9.0/design/scheduler_core_design.md b/versioned_docs/version-0.9.0/design/scheduler_core_design.md
new file mode 100644
index 0000000..f25fa98
--- /dev/null
+++ b/versioned_docs/version-0.9.0/design/scheduler_core_design.md
@@ -0,0 +1,395 @@
+---
+id: scheduler_core_design
+title: Scheduler Core Design
+---
+
+<!--
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ -->
+
+Github repo: https://github.com/apache/incubator-yunikorn-core/
+
+Scheduler core encapsulates all scheduling algorithms, it collects resources from underneath resource management
+platforms (like YARN/K8s), and is responsible for container allocation requests. It makes the decision where is the
+best spot for each request and then sends response allocations to the resource management platform.
+Scheduler core is agnostic about underneath platforms, all the communications are through the [scheduler interface](https://github.com/apache/incubator-yunikorn-scheduler-interface).
+
+## Components:
+
+```
+
+                     +---------------+  +--------------+
+                     |K8s Shim       |  |YARN Shim     |
+                     +---------------+  +--------------+
+
+                                +--------------+   +------------+
+                Scheduler-      | GRPC Protocol|   |Go API      |
+                Interface:      +--------------+   +------------+
+
++---------------------------------------------------------------------------+
+                     +--------------------+
+                     |Scheduler API Server|
+ +-------------+     +---------+----------+
+ |AdminService |               |
+ +-------------+               |Write Ops                    +----------------+
+ +-------------+               V                            ++Scheduler       |
+ |Configurator |      +-------------------+  Allocate       ||   And          |
+ +-------------+      |Cache Event Handler+<-----------------|                |
+         +----------> +-------------------+  Preempt        ++Preemptor       |
+          Update Cfg   Handled by policies                   +----------------+
+                               +  (Stateless)
+                        +------v--------+
+                        |Scheduler Cache|
+                        +---------------+
+                +---------------------------------------------+
+                |--------+ +------+ +----------+ +----------+ |
+                ||Node   | |Queue | |Allocation| |Requests  | |
+                |--------+ +------+ +----------+ +----------+ |
+                +---------------------------------------------+
+```
+
+###Scheduler API Server (RMProxy)
+
+Responsible for communication between RM and Scheduler, which implements scheduler-interface GRPC protocol,
+or just APIs. (For intra-process communication w/o Serde).
+
+### Scheduler Cache
+
+Caches all data related to scheduler state, such as used resources of each queues, nodes, allocations.
+Relationship between allocations and nodes, etc. Should not include in-flight data for resource allocation.
+For example to-be-preempted allocation candidates. Fair share resource of queues, etc.
+
+### Scheduler Cache Event Handler
+
+Handles all events which needs to update scheduler internal state. So all the write operations will be carefully handled.
+
+### Admin Service
+
+Handles request from Admin, which can also load configurations from storage and update scheduler policies.
+
+### Scheduler and Preemptor
+
+Handles Scheduler's internal state. (Which is not belong to scheduelr cache), such as internal reservations, etc.
+Scheduler and preemptor will work together, make scheduling or preemption decisions. All allocate/preempt request
+will be handled by event handler.
+
+## Scheduler's responsibility
+
+- According to resource usages between queues, sort queues, applications, and figure out order of application allocation. (This will be used by preemption as well).
+- It is possible that we cannot satisfy some of the allocation request, we need to skip them and find next request.
+- It is possible that some allocation request cannot be satisfied because of resource fragmentation. We need to reserve room for such requests.
+- Different nodes may belong to different disjoint partitions, we can make independent scheduler runs
+- Be able to config and change ordering policies for apps, queues.
+- Application can choose their own way to manage sort of nodes.
+
+## Preemption
+
+- It is important to know "who wants the resource", so we can do preemption based on allocation orders.
+- When do preemption, it is also efficient to trigger allocation op. Think about how to do it.
+- Preemption needs to take care about queue resource balancing.
+
+## Communication between Shim and Core 
+
+YuniKorn-Shim (like https://github.com/apache/incubator-yunikorn-k8shim) communicates with core by
+using scheduler-interface (https://github.com/apache/incubator-yunikorn-scheduler-interface).
+Scheduler interface has Go API or GRPC. Currently, yunikorn-k8shim is using Go API to communicate with yunikorn-core
+to avoid extra overhead introduced by GRPC. 
+
+**Shim (like K8shim) first need to register with core:** 
+
+```go
+func (m *RMProxy) RegisterResourceManager(request *si.RegisterResourceManagerRequest, callback api.ResourceManagerCallback) (*si.RegisterResourceManagerResponse, error)
+```
+
+Which indicate ResourceManager's name, a callback function for updateResponse. The design of core is be able to do scheduling for multiple clusters (such as multiple K8s cluster) just with one core instance.
+
+**Shim interacts with core by invoking RMProxy's Update API frequently, which updates new allocation request, allocation to kill, node updates, etc.** 
+
+```go
+func (m *RMProxy) Update(request *si.UpdateRequest) error
+```
+
+Response of update (such as new allocated container) will be received by registered callback.
+
+## Configurations & Semantics
+
+Example of configuration:
+
+- Partition is name space.
+- Same queues can under different partitions, but enforced to have same hierarchy.
+
+    Good:
+
+    ```
+     partition=x    partition=y
+         a           a
+       /   \        / \
+      b     c      b   c
+    ```
+
+    Good (c in partition y acl=""):
+
+    ```
+     partition=x    partition=y
+         a           a
+       /   \        /
+      b     c      b
+    ```
+
+    Bad (c in different hierarchy)
+
+    ```
+     partition=x    partition=y
+         a           a
+       /   \        /  \
+      b     c      b    d
+                  /
+                 c
+    ```
+
+    Bad (Duplicated c)
+
+    ```
+     partition=x
+         a
+       /   \
+      b     c
+     /
+    c
+
+    ```
+
+- Different hierarchies can be added
+
+    ```scheduler-conf.yaml
+    partitions:
+      - name:  default
+        queues:
+            root:
+              configs:
+                acls:
+              childrens:
+                - a
+                - b
+                - c
+                - ...
+            a:
+              configs:
+                acls:
+                capacity: (capacity is not allowed to set for root)
+                max-capacity: ...
+          mapping-policies:
+            ...
+      - name: partition_a:
+        queues:
+            root:...
+    ```
+
+## How scheduler do allocation
+
+Scheduler runs a separate goroutine to look at asks and available resources, and do resource allocation. Here's allocation logic in pseudo code: 
+
+Entry point of scheduler allocation is `scheduler.go: func (s *Scheduler) schedule()`
+
+```
+# First of all, YuniKorn has partition concept, a logical resource pool can consists
+# of one of multiple physical dis-joint partitions. It is similar to YARN's node
+# partition concept.
+
+for partition : partitions:
+  # YuniKorn can reserve allocations for picky asks (such as large request, etc.)
+  # Before doing regular allocation, YuniKorn look at reservedAllocations first.
+  for reservedAllocation : partition.reservedAllocations: 
+     reservedAllocation.tryAllocate(..)
+  
+  # After tried all reserved allocation, YuniKorn will go to regular allocation
+  partition.tryAllocate(..)
+  
+  # If there's any allocation created, scheduler will create an AllocationProposal
+  # and send to Cache to "commit" the AllocationProposal 
+```
+
+**Allocation by hierchical of queues**
+
+Inside `partition.tryAllocate` 
+
+It recursively traverse from root queue and down to lower level, for each level, logic is inside `pkg/scheduler/scheduling_queue.go func (sq *SchedulingQueue) tryAllocate`
+
+Remember YuniKorn natively supports hierarchical of queues. For ParentQueue (which has sub queues under the parent queue), it uses queue's own sorting policy to sort subqueues and try to allocate from most preferred queue to least-preferred queue. 
+
+For LeafQueue (which has applications inside the queue), it uses queue's own sorting policy to sort applications belong to the queue and allocate based on the sorted order. 
+
+(All sorting policies can be configured differently at each level.) 
+
+**Allocation by application**
+
+When it goes to Application, see (`scheduler_application.go: func (sa *SchedulingApplication) tryAllocate`), It first sort the pending resource requests belong to the application (based on requests' priority). And based on the selected request, and configured node-sorting policy, it sorts nodes belong to the partition and try to allocate resources on the sorted nodes. 
+
+When application trying to allocate resources on nodes, it will invokes PredicatePlugin to make sure Shim can confirm the node is good. (For example K8shim runs predicates check for allocation pre-check).
+
+**Allocation completed by scheduler** 
+
+Once allocation is done, scheduler will create an AllocationProposal and send to Cache to do further check, we will cover details in the upcoming section.
+
+## Flow of events
+
+Like mentioned before, all communications between components like RMProxy/Cache/Schedulers are done by using async event handler. 
+
+RMProxy/Cache/Scheduler include local event queues and event handlers. RMProxy and Scheduler have only one queue (For example: `pkg/scheduler/scheduler.go: handleSchedulerEvent`), and Cache has two queues (One for events from RMProxy, and one for events from Scheduler, which is designed for better performance). 
+
+We will talk about how events flowed between components: 
+
+**Events for ResourceManager registration and updates:**
+
+```
+Update from ResourceManager -> RMProxy -> RMUpdateRequestEvent Send to Cache
+New ResourceManager registration -> RMProxy -> RegisterRMEvent Send to Cache
+```
+
+**Cache Handles RM Updates** 
+
+There're many fields inside RM Update event (`RMUpdateRequestEvent`), among them, we have following categories: 
+
+```
+1) Update for Application-related updates
+2) Update for New allocation ask and release. 
+3) Node (Such as kubelet) update (New node, remove node, node resource change, etc.)
+```
+
+More details can be found at: 
+
+```
+func (m *ClusterInfo) processRMUpdateEvent(event *cacheevent.RMUpdateRequestEvent)
+
+inside cluster_info.go
+```
+
+**Cache send RM updates to Scheduler**
+
+For most cases, Cache propagate updates from RM to scheduler directly (including Application, Node, Asks, etc.). And it is possible that some updates from RM is not valid (such as adding an application to a non-existed queue), for such cases, cache can send an event back to RMProxy and notify the ResourceManager. (See `RMApplicationUpdateEvent.RejectedApplications`)
+
+**Cache handles scheduler config** 
+
+Cache also handles scheduler's config changes, see
+
+```go
+func (m *ClusterInfo) processRMConfigUpdateEvent(event *commonevents.ConfigUpdateRMEvent)
+```
+
+Similar to other RM updates, it propages news to scheduelr.
+
+**Scheduler do allocation**
+
+Once an AllocationProposal created by scheduler, scheduler sends `AllocationProposalBundleEvent` to Cache to commit. 
+
+Cache look at AllocationProposal under lock, and commit these proposals. The reason to do proposal/commit is Scheduler can run in multi-threads which could cause conflict for resource allocation. This approach is inspired by Borg/Omega/YARN Global Scheduling.
+
+Cache checks more states such as queue resources, node resources (we cannot allocate more resource than nodes' available), etc. Once check is done, Cache updates internal data strcture and send confirmation to Scheduler to update the same, and scheduler sends allocated Allocation to RMProxy so Shim can do further options. For example, K8shim will `bind` an allocation (POD) to kubelet.
+
+```
+Job Add:
+--------
+RM -> Cache -> Scheduler (Implemented)
+
+Job Remove:
+-----------
+RM -> Scheduler -> Cache (Implemented)
+Released allocations: (Same as normal release) (Implemented)
+Note: Make sure remove from scheduler first to avoid new allocated created. 
+
+Scheduling Request Add:
+-----------------------
+RM -> Cache -> Scheduler (Implemented)
+Note: Will check if requested job exists, queue exists, etc.
+When any request invalid:
+   Cache -> RM (Implemented)
+   Scheduler -> RM (Implemented)
+
+Scheduling Request remove:
+------------------------- 
+RM -> Scheduler -> Cache (Implemented)
+Note: Make sure removed from scheduler first to avoid new container allocated
+
+Allocation remove (Preemption) 
+-----------------
+Scheduler -> Cache -> RM (TODO)
+              (confirmation)
+
+Allocation remove (RM voluntarilly ask)
+---------------------------------------
+RM -> Scheduler -> Cache -> RM. (Implemented)
+                      (confirmation)
+
+Node Add: 
+---------
+RM -> Cache -> Scheduler (Implemented)
+Note: Inside Cache, update allocated resources.
+Error handling: Reject Node to RM (Implemented)
+
+Node Remove: 
+------------
+Implemented in cache side
+RM -> Scheduler -> Cache (TODO)
+
+Allocation Proposal:
+--------------------
+Scheduler -> Cache -> RM
+When rejected/accepted:
+    Cache -> Scheduler
+    
+Initial: (TODO)
+--------
+1. Admin configured partitions
+2. Cache initializes
+3. Scheduler copies configurations
+
+Relations between Entities 
+-------------------------
+1. RM includes one or multiple:
+   - Partitions 
+   - Jobs
+   - Nodes 
+   - Queues
+   
+2. One queue: 
+   - Under one partition
+   - Under one RM.
+   
+3. One job: 
+   - Under one queue (Job with same name can under different partitions)
+   - Under one partition
+
+RM registration: (TODO)
+----------------
+1. RM send registration
+2. If RM already registered, remove old one, including everything belong to RM.
+
+RM termination (TODO) 
+--------------
+Just remove the old one.
+
+Update of queues (TODO) 
+------------------------
+Admin Service -> Cache
+
+About partition (TODO) 
+-----------------------
+Internal partition need to be normalized, for example, RM specify node with partition = xyz. 
+Scheduler internally need to normalize it to <rm-id>_xyz
+This need to be done by RMProxy
+
+```
\ No newline at end of file
diff --git a/docs/design/scheduler_object_states.md b/versioned_docs/version-0.9.0/design/scheduler_object_states.md
similarity index 97%
copy from docs/design/scheduler_object_states.md
copy to versioned_docs/version-0.9.0/design/scheduler_object_states.md
index d761658..c0a9db3 100644
--- a/docs/design/scheduler_object_states.md
+++ b/versioned_docs/version-0.9.0/design/scheduler_object_states.md
@@ -69,7 +69,7 @@ The events that can trigger a state change:
 * Kill: kill an application (source: resource manager)
 
 Here is a diagram that shows the states with the event that causes the state to change:  
-![application state diagram](/img/application-state.png)
+![application state diagram](./../assets/application-state.png)
 
 ### Object State
 <!-- fix the draining to stopped transition -->
@@ -96,7 +96,7 @@ The events that can trigger a state change:
 * Remove: mark an object for removal (source: core scheduler)
 
 Here is a diagram that shows the states with the event that causes the state to change:  
-![object state diagram](/img/object-state.png)
+![object state diagram](./../assets/object-state.png)
 
 ### Node
 <!-- should start using object state -->
diff --git a/docs/design/state_aware_scheduling.md b/versioned_docs/version-0.9.0/design/state_aware_scheduling.md
similarity index 99%
copy from docs/design/state_aware_scheduling.md
copy to versioned_docs/version-0.9.0/design/state_aware_scheduling.md
index 7cc35a1..f92f93c 100644
--- a/docs/design/state_aware_scheduling.md
+++ b/versioned_docs/version-0.9.0/design/state_aware_scheduling.md
@@ -97,7 +97,7 @@ Weighing those against each other the proposal is to not make this configurable.
 ### Example run
 Using Spark applications as an example: a new application can only be scheduled if the previous application has at least one (1) executor allocated.
 
-![images](/img/fifo-state-example.png)
+![images](./../assets/fifo-state-example.png)
 
 Assume we have the following Spark apps: App1 & App2 as in the diagram above. The applications were submitted in that order: App1 first, then App2. They were both submitted to the same queue.
 
diff --git a/versioned_docs/version-0.9.0/developer_guide/build.md b/versioned_docs/version-0.9.0/developer_guide/build.md
new file mode 100644
index 0000000..1c68166
--- /dev/null
+++ b/versioned_docs/version-0.9.0/developer_guide/build.md
@@ -0,0 +1,166 @@
+---
+id: build
+title: Build and Run
+---
+
+<!--
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an
+"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+KIND, either express or implied.  See the License for the
+specific language governing permissions and limitations
+under the License.
+-->
+
+YuniKorn always works with a container orchestrator system. Currently, a Kubernetes shim [yunikorn-k8shim](https://github.com/apache/incubator-yunikorn-k8shim)
+is provided in our repositories, you can leverage it to develop YuniKorn scheduling features and integrate with Kubernetes.
+This document describes resources how to setup dev environment and how to do the development.
+
+## Development Environment setup
+
+Read the [environment setup guide](developer_guide/env_setup.md) first to setup Docker and Kubernetes development environment.
+
+## Build YuniKorn for Kubernetes
+
+Prerequisite:
+- Go 1.11+
+
+You can build the scheduler for Kubernetes from [yunikorn-k8shim](https://github.com/apache/incubator-yunikorn-k8shim) project.
+The build procedure will built all components into a single executable that can be deployed and running on Kubernetes.
+
+Start the integrated build process by pulling the `yunikorn-k8shim` repository:
+```bash
+mkdir $HOME/yunikorn/
+cd $HOME/yunikorn/
+git clone https://github.com/apache/incubator-yunikorn-k8shim.git
+```
+At this point you have an environment that will allow you to build an integrated image for the YuniKorn scheduler.
+
+### Build Docker image
+
+Building a docker image can be triggered by following command.
+
+```
+make image
+```
+
+The image with the build in configuration can be deployed directly on kubernetes.
+Some sample deployments that can be used are found under [deployments](https://github.com/apache/incubator-yunikorn-k8shim/tree/master/deployments/scheduler) directory.
+For the deployment that uses a config map you need to set up the ConfigMap in kubernetes.
+How to deploy the scheduler with a ConfigMap is explained in the [scheduler configuration deployment](developer_guide/deployment.md) document.
+
+The image build command will first build the integrated executable and then create the docker image.
+Currently, there are some published docker images under [this docker hub repo](https://hub.docker.com/r/apache/yunikorn), you are free to fetch and use.
+But keep in mind, YuniKorn has no official release yet, the latest version image can only be used for testing or evaluating, do not use it in production.
+The default image tags are not be suitable for deployments to an accessible repository as it uses a hardcoded user and would push to Docker Hub with proper credentials.
+You *must* update the `TAG` variable in the `Makefile` to push to an accessible repository.
+When you update the image tag be aware that the deployment examples given will also need to be updated to reflect the same change.
+
+### Inspect the docker image
+
+The docker image built from previous step has embedded some important build info in image's metadata. You can retrieve
+these info with docker `inspect` command.
+
+```
+docker inspect apache/yunikorn:scheduler-latest
+```
+
+these info includes git revisions (last commit SHA) for each component, to help you understand which version of the source code
+was shipped by this image. They are listed as docker image `labels`, such as
+
+```
+"Labels": {
+    "BuildTimeStamp": "2019-07-16T23:08:06+0800",
+    "Version": "0.1",
+    "yunikorn-core-revision": "dca66c7e5a9e",
+    "yunikorn-k8shim-revision": "bed60f720b28",
+    "yunikorn-scheduler-interface-revision": "3df392eded1f"
+}
+```
+
+### Dependencies
+
+The dependencies in the projects are managed using [go modules](https://blog.golang.org/using-go-modules).
+Go Modules require at least Go version 1.11 to be installed on the development system.
+
+If you want to modify one of the projects locally and build with your local dependencies you will need to change the module file. 
+Changing dependencies uses mod `replace` directives as explained in the [Update dependencies](#Updating dependencies).
+
+The YuniKorn project has four repositories three of those repositories have a dependency at the go level.
+These dependencies are part of the go modules and point to the github repositories.
+During development it can be required to break the dependency on the committed version from github.
+This requires making changes in the module file to allow loading a local copy or a forked copy from a different repository.  
+
+#### Affected repositories
+The following dependencies exist between the repositories:
+
+| repository| depends on |
+| --- | --- |
+| yunikorn-core | yunikorn-scheduler-interface | 
+| yunikorn-k8shim | yunikorn-scheduler-interface, yunikorn-core |
+| yunikorn-scheduler-interface | none |
+| yunikorn-web | yunikorn-core |
+
+The `yunikorn-web` repository has no direct go dependency on the other repositories. However any change to the `yunikorn-core` webservices can affect the web interface. 
+
+#### Making local changes
+
+To make sure that the local changes will not break other parts of the build you should run:
+- A full build `make` (build target depends on the repository)
+- A full unit test run `make test`
+
+Any test failures should be fixed before proceeding.
+
+#### Updating dependencies
+
+The simplest way is to use the `replace` directive in the module file. The `replace` directive allows you to override the import path with a new (local) path.
+There is no need to change any of the imports in the source code. The change must be made in the `go.mod` file of the repository that has the dependency. 
+
+Using `replace` to use of a forked dependency, such as:
+```
+replace github.com/apache/incubator-yunikorn-core => example.com/some/forked-yunikorn
+```
+
+There is no requirement to fork and create a new repository. If you do not have a repository you can use a local checked out copy too. 
+Using `replace` to use of a local directory as a dependency:
+```
+replace github.com/apache/incubator-yunikorn-core => /User/example/local/checked-out-yunikorn
+```
+and for the same dependency using a relative path:
+```
+replace github.com/apache/incubator-yunikorn-core => ../checked-out-yunikorn
+```
+Note: if the `replace` directive is using a local filesystem path, then the target must have the `go.mod` file at that location.
+
+Further details on the modules wiki: [When should I use the 'replace' directive?](https://github.com/golang/go/wiki/Modules#when-should-i-use-the-replace-directive).
+
+## Build the web UI
+
+Example deployments reference the [YuniKorn web UI](https://github.com/apache/incubator-yunikorn-web). 
+The YuniKorn web UI has its own specific requirements for the build. The project has specific requirements for the build follow the steps in the README to prepare a development environment and build how to build the projects.
+The scheduler is fully functional without the web UI. 
+
+## Locally run the integrated scheduler
+
+When you have a local development environment setup you can run the scheduler in your local kubernetes environment.
+This has been tested in a Docker desktop with docker for desktop and Minikube. See the [environment setup guide](developer_guide/env_setup.md) for further details.
+
+```
+make run
+```
+It will connect with the kubernetes cluster using the users configured configuration located in `$HOME/.kube/config`.
+
+You can also use the same approach to run the scheduler locally but connecting to a remote kubernetes cluster,
+as long as the `$HOME/.kube/config` file is pointing to that remote cluster.
+
+
diff --git a/versioned_docs/version-0.9.0/developer_guide/deployment.md b/versioned_docs/version-0.9.0/developer_guide/deployment.md
new file mode 100644
index 0000000..a4b4855
--- /dev/null
+++ b/versioned_docs/version-0.9.0/developer_guide/deployment.md
@@ -0,0 +1,122 @@
+---
+id: deployment
+title: Deploy to Kubernetes
+---
+
+<!--
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an
+"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+KIND, either express or implied.  See the License for the
+specific language governing permissions and limitations
+under the License.
+-->
+
+The easiest way to deploy YuniKorn is to leverage our [helm charts](https://hub.helm.sh/charts/yunikorn/yunikorn),
+you can find the guide [here](get_started/get_started.md). This document describes the manual process to deploy YuniKorn
+scheduler and it is majorly for developers.
+
+## Build docker image
+
+Under project root of the `yunikorn-k8shim`, run the command to build an image using the map for the configuration:
+```
+make image
+```
+
+This command will build an image. The image will be tagged with a default version and image tag.
+
+**Note** the default build uses a hardcoded user and tag. You *must* update the `IMAGE_TAG` variable in the `Makefile` to push to an appropriate repository. 
+
+
+## Setup RBAC
+
+The first step is to create the RBAC role for the scheduler, see [yunikorn-rbac.yaml](https://github.com/apache/incubator-yunikorn-k8shim/blob/master/deployments/scheduler/yunikorn-rbac.yaml)
+```
+kubectl create -f scheduler/yunikorn-rbac.yaml
+```
+The role is a requirement on the current versions of kubernetes.
+
+## Create the ConfigMap
+
+This must be done before deploying the scheduler. It requires a correctly setup kubernetes environment.
+This kubernetes environment can be either local or remote. 
+
+- download configuration file if not available on the node to add to kubernetes:
+```
+curl -o queues.yaml https://raw.githubusercontent.com/apache/incubator-yunikorn-k8shim/master/conf/queues.yaml
+```
+- create ConfigMap in kubernetes:
+```
+kubectl create configmap yunikorn-configs --from-file=queues.yaml
+```
+- check if the ConfigMap was created correctly:
+```
+kubectl describe configmaps yunikorn-configs
+```
+
+**Note** if name of the ConfigMap is changed the volume in the scheduler yaml file must be updated to reference the new name otherwise the changes to the configuration will not be picked up. 
+
+## Attach ConfigMap to the Scheduler Pod
+
+The ConfigMap is attached to the scheduler as a special volume. First step is to specify where to mount it in the pod:
+```yaml
+  volumeMounts:
+    - name: config-volume
+      mountPath: /etc/yunikorn/
+```
+Second step is to link the mount point back to the configuration map created in kubernetes:
+```yaml
+  volumes:
+    - name: config-volume
+      configMap:
+        name: yunikorn-configs
+``` 
+
+Both steps are part of the scheduler yaml file, an example can be seen at [scheduler.yaml](https://github.com/apache/incubator-yunikorn-k8shim/blob/master/deployments/scheduler/scheduler.yaml)
+for reference.
+
+## Deploy the Scheduler
+
+The scheduler can be deployed with following command.
+```
+kubectl create -f deployments/scheduler/scheduler.yaml
+```
+
+The deployment will run 2 containers from your pre-built docker images in 1 pod,
+
+* yunikorn-scheduler-core (yunikorn scheduler core and shim for K8s)
+* yunikorn-scheduler-web (web UI)
+
+The pod is deployed as a customized scheduler, it will take the responsibility to schedule pods which explicitly specifies `schedulerName: yunikorn` in pod's spec.
+
+## Access to the web UI
+
+When the scheduler is deployed, the web UI is also deployed in a container.
+Port forwarding for the web interface on the standard ports can be turned on via:
+
+```
+POD=`kubectl get pod -l app=yunikorn -o jsonpath="{.items[0].metadata.name}"` && \
+kubectl port-forward ${POD} 9889 9080
+```
+
+`9889` is the default port for Web UI, `9080` is the default port of scheduler's Restful service where web UI retrieves info from.
+Once this is done, web UI will be available at: http://localhost:9889.
+
+## Configuration Hot Refresh
+
+YuniKorn supports to load configuration changes automatically from attached configmap. Simply update the content in the configmap,
+that can be done either via Kubernetes dashboard UI or commandline. _Note_, changes made to the configmap might have some
+delay to be picked up by the scheduler.
+
+
+
diff --git a/docs/developer_guide/env_setup.md b/versioned_docs/version-0.9.0/developer_guide/env_setup.md
similarity index 89%
copy from docs/developer_guide/env_setup.md
copy to versioned_docs/version-0.9.0/developer_guide/env_setup.md
index 4d0f1b8..c45d77e 100644
--- a/docs/developer_guide/env_setup.md
+++ b/versioned_docs/version-0.9.0/developer_guide/env_setup.md
@@ -37,7 +37,7 @@ Just simply follow the instruction [here](https://docs.docker.com/docker-for-mac
 
 Once Kubernetes is started in docker desktop, you should see something similar below:
 
-![Kubernetes in Docker Desktop](/img/docker-desktop.png)
+![Kubernetes in Docker Desktop](./../assets/docker-desktop.png)
 
 This means that:
 1. Kubernetes is running.
@@ -68,9 +68,9 @@ The dashboard as deployed in the previous step requires a token or config to sig
     ```
 3. copy the token value which is part of the `Data` section with the tag `token`.
 4. select the **Token** option in the dashboard web UI:<br/>
-    ![Token Access in dashboard](/img/dashboard_token_select.png)
+    ![Token Access in dashboard](./../assets/dashboard_token_select.png)
 5. paste the token value into the input box and sign in:<br/>
-    ![Token Access in dashboard](/img/dashboard_secret.png)
+    ![Token Access in dashboard](./../assets/dashboard_secret.png)
 
 ## Local Kubernetes cluster with Minikube
 Minikube can be added to an existing Docker Desktop install. Minikube can either use the pre-installed hypervisor or use a hypervisor of choice. These instructions use [HyperKit](https://github.com/moby/hyperkit) which is embedded in Docker Desktop.   
@@ -78,28 +78,28 @@ Minikube can be added to an existing Docker Desktop install. Minikube can either
 If you want to use a different hypervisor then HyperKit make sure that you follow the generic minikube install instructions. Do not forget to install the correct driver for the chosen hypervisor if required.
 The basic instructions are provided in the [minikube install](https://kubernetes.io/docs/tasks/tools/install-minikube/) instructions.
 
-Check hypervisor
 Docker Desktop should have already installed HyperKit. In a terminal run: `hyperkit` to confirm. Any response other than `hyperkit: command not found` confirms that HyperKit is installed and on the path. If it is not found you can choose a different hypervisor or fix the Docker Desktop install.
+Check hypervisor Docker Desktop should have already installed HyperKit. In a terminal run: `hyperkit` to confirm. Any response other than `hyperkit: command not found` confirms that HyperKit is installed and on the path. If it is not found you can choose a different hypervisor or fix the Docker Desktop install.
 
 ### Installing Minikube
 1. install minikube, you can either use brew or directly via these steps: 
     ```shell script
-    
curl -Lo minikube https://storage.googleapis.com/minikube/releases/latest/minikube-darwin-amd64
-    chmod +x minikube

+    curl -Lo minikube https://storage.googleapis.com/minikube/releases/latest/minikube-darwin-amd64
+    chmod +x minikube
     sudo mv minikube /usr/local/bin
     ```
-1. install HyperKit driver
 (required), you can either use brew or directly via these steps:
+1. install HyperKit driver (required), you can either use brew or directly via these steps:
     ```shell script
     curl -LO https://storage.googleapis.com/minikube/releases/latest/docker-machine-driver-hyperkit
     sudo install -o root -g wheel -m 4755 docker-machine-driver-hyperkit /usr/local/bin/
     ```
-1. update the minikube config to default to the HyperKit install
 `minikube config set vm-driver hyperkit`
+1. update the minikube config to default to the HyperKit install `minikube config set vm-driver hyperkit`
 1. change docker desktop to use minikube for Kubernetes:<br/>
-    ![Kubernetes in Docker Desktop: minikube setting](/img/docker-dektop-minikube.png)
+    ![Kubernetes in Docker Desktop: minikube setting](./../assets/docker-dektop-minikube.png)
 
 ### Deploy and access the cluster
 After the installation is done you can start a new cluster.
 1. start the minikube cluster: `minikube start --kubernetes-version v1.14.2`
-1. start the minikube dashboard: `
minikube dashboard &`
+1. start the minikube dashboard: `minikube dashboard &`
 
 ### Build impact
 When you create images make sure that the build is run after pointing it to the right environment. 
@@ -116,7 +116,7 @@ Note, this instruction requires you have GoLand IDE for development.
 In GoLand, go to yunikorn-k8shim project. Then click "Run" -> "Debug..." -> "Edit Configuration..." to get the pop-up configuration window.
 Note, you need to click "+" to create a new profile if the `Go Build` option is not available at the first time.
 
-![Debug Configuration](/img/goland_debug.jpg)
+![Debug Configuration](./../assets/goland_debug.jpg)
 
 The highlighted fields are the configurations you need to add. These include:
 
diff --git a/versioned_docs/version-0.9.0/get_started/core_features.md b/versioned_docs/version-0.9.0/get_started/core_features.md
new file mode 100644
index 0000000..8f22589
--- /dev/null
+++ b/versioned_docs/version-0.9.0/get_started/core_features.md
@@ -0,0 +1,73 @@
+---
+id: core_features
+title: Features
+keywords:
+ - feature
+---
+
+<!--
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an
+"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+KIND, either express or implied.  See the License for the
+specific language governing permissions and limitations
+under the License.
+-->
+
+The main features of YuniKorn include:
+
+## App-aware scheduling
+One of the key differences of YuniKorn is, it does app-aware scheduling. In default K8s scheduler, it simply schedules
+pod by pod, without any context about user, app, queue. However, YuniKorn recognizes users, apps, queues, and it considers
+a lot more factors, e.g resource, ordering etc, while making scheduling decisions. This gives us the possibility to do
+fine-grained controls on resource quotas, resource fairness and priorities, which are the most important requirements
+for a multi-tenancy computing system.
+
+## Hierarchy Resource Queues
+
+Hierarchy queues provide an efficient mechanism to manage cluster resources. The hierarchy of the queues can logically
+map to the structure of an organization. This gives fine-grained control over resources for different tenants. The YuniKorn
+UI provides a centralised view to monitor the usage of resource queues, it helps you to get the insight how the resources are
+used across different tenants. What's more, By leveraging the min/max queue capacity, it can define how elastic it can be
+in terms of the resource consumption for each tenant.
+
+## Job Ordering and Queuing
+Applications can be properly queued in working-queues, the ordering policy determines which application can get resources first.
+The policy can be various, such as simple `FIFO`, `Fair`, `StateAware` or `Priority` based. Queues can maintain the order of applications,
+and based on different policies, the scheduler allocates resources to jobs accordingly. The behavior is much more predictable.
+
+What's more, when the queue max-capacity is configured, jobs and tasks can be properly queued up in the resource queue.
+If the remaining capacity is not enough, they can be waiting in line until some resources are released. This simplifies
+the client side operation. Unlike the default scheduler, resources are capped by namespace resource quotas,
+and that is enforced by the quota-admission-controller, if the underneath namespace has no enough quota, pods cannot be
+created. Client side needs complex logic, e.g retry by condition, to handle such scenarios.
+
+## Resource fairness
+In a multi-tenant environment, a lot of users are sharing cluster resources. To avoid tenants from competing resources
+and potential get starving. More fine-grained fairness needs to achieve fairness across users, as well as teams/organizations.
+With consideration of weights or priorities, some more important applications can get high demand resources that stand over its share.
+This is often associated with resource budget, a more fine-grained fairness mode can further improve the expense control.
+
+## Resource Reservation
+
+YuniKorn automatically does reservations for outstanding requests. If a pod could not be allocated, YuniKorn will try to
+reserve it on a qualified node and tentatively allocate the pod on this reserved node (before trying rest of nodes).
+This mechanism can avoid this pod gets starved by later submitted smaller, less-picky pods.
+This feature is important in the batch workloads scenario because when a large amount of heterogeneous pods is submitted
+to the cluster, it's very likely some pods can be starved even they are submitted much earlier. 
+
+## Throughput
+Throughput is a key criterion to measure scheduler performance. It is critical for a large scale distributed system.
+If throughput is bad, applications may waste time on waiting for scheduling, and further impact service SLAs.
+When the cluster gets bigger, it also means the requirement of higher throughput. The [performance evaluation based on Kube-mark](performance/evaluate_perf_function_with_kubemark.md)
+reveals some perf numbers.
diff --git a/docs/get_started/get_started.md b/versioned_docs/version-0.9.0/get_started/get_started.md
similarity index 92%
copy from docs/get_started/get_started.md
copy to versioned_docs/version-0.9.0/get_started/get_started.md
index cfb72a5..ba465de 100644
--- a/docs/get_started/get_started.md
+++ b/versioned_docs/version-0.9.0/get_started/get_started.md
@@ -24,7 +24,7 @@ under the License.
 
 Before reading this guide, we assume you either have a Kubernetes cluster, or a local Kubernetes dev environment, e.g MiniKube.
 It is also assumed that `kubectl` is on your path and properly configured.
-Follow this [guide](developer_guide/env_setup.md) on how to setup a local Kubernetes cluster using docker-desktop.
+Follow this [guide](../developer_guide/env_setup.md) on how to setup a local Kubernetes cluster using docker-desktop.
 
 ## Install
 
@@ -43,7 +43,7 @@ When `admission-controller` is installed, it simply routes all traffic to YuniKo
 is delegated to YuniKorn. You can disable it by setting `embedAdmissionController` flag to false during the helm install.  
 
 If you don't want to use helm charts, you can find our step-by-step
-tutorial [here](developer_guide/deployment.md).
+tutorial [here](../developer_guide/deployment.md).
 
 ## Uninstall
 
@@ -65,7 +65,7 @@ kubectl port-forward svc/yunikorn-service 9080:9080 -n yunikorn
 `9889` is the default port for Web UI, `9080` is the default port of scheduler's Restful service where web UI retrieves info from.
 Once this is done, web UI will be available at: http://localhost:9889.
 
-![UI Screenshots](/img/yk-ui-screenshots.gif)
+![UI Screenshots](./../assets/yk-ui-screenshots.gif)
 
 YuniKorn UI provides a centralised view for cluster resource capacity, utilization, and all application info.
 
diff --git a/docs/performance/evaluate_perf_function_with_kubemark.md b/versioned_docs/version-0.9.0/performance/evaluate_perf_function_with_kubemark.md
similarity index 96%
copy from docs/performance/evaluate_perf_function_with_kubemark.md
copy to versioned_docs/version-0.9.0/performance/evaluate_perf_function_with_kubemark.md
index d3e7c6f..066596a 100644
--- a/docs/performance/evaluate_perf_function_with_kubemark.md
+++ b/versioned_docs/version-0.9.0/performance/evaluate_perf_function_with_kubemark.md
@@ -36,7 +36,7 @@ In YuniKorn, we have done lots of optimizations to improve the performance, such
 and low-latency sorting policies. The following chart reveals the scheduler throughput (by using Kubemark simulated
 environment, and submitting 50,000 pods), comparing to the K8s default scheduler.
 
-![Scheduler Throughput](/img/throughput.png)
+![Scheduler Throughput](./../assets/throughput.png)
 
 The charts record the time spent until all pods are running on the cluster
 
@@ -52,7 +52,7 @@ Each of YuniKorn queues has its guaranteed and maximum capacity. When we have lo
 YuniKorn ensures each of them gets its fair share. When we monitor the resource usage of these queues, we can clearly
 see how fairness was enforced:
 
-![Scheduler Throughput](/img/queue-fairness.png)
+![Scheduler Throughput](./../assets/queue-fairness.png)
 
 We set up 4 heterogeneous queues on this cluster, and submit different workloads against these queues.
 From the chart, we can see the queue resources are increasing nearly in the same trend, which means the resource
@@ -83,7 +83,7 @@ This means at the given time, this cluster has 100 nodes whose utilization is in
 it has 300 nodes whose utilization is in the range 10% - 20%, and so on… Now, we run lots of workloads and
 collect metrics, see the below chart:
 
-<img src="/img/node-fair.png" />
+<img src="./../assets/node-fair.png" />
 
 We can see all nodes have 0% utilization, and then all of them move to bucket-1, then bucket-2 … and eventually
 all nodes moved to bucket-9, which means all capacity is used. In another word, nodes’ resource has been used in
@@ -93,7 +93,7 @@ a fairness manner.
 
 This is When the bin-packing policy is enabled, we can see the following pattern:
 
-<img src="/img/node-bin-packing.png" />
+<img src="./../assets/node-bin-packing.png" />
 
 On the contrary, all nodes are moving between 2 buckets, bucket-0 and bucket-9. Nodes in bucket-0 (0% - 10%)
 are decreasing in a linear manner, and nodes in bucket-9 (90% - 100%) are increasing with the same curve.
diff --git a/versioned_docs/version-0.9.0/performance/metrics.md b/versioned_docs/version-0.9.0/performance/metrics.md
new file mode 100644
index 0000000..d7ebfa4
--- /dev/null
+++ b/versioned_docs/version-0.9.0/performance/metrics.md
@@ -0,0 +1,72 @@
+---
+id: metrics
+title: Scheduler Metrics
+keywords:
+ - metrics
+---
+
+<!--
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an
+"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+KIND, either express or implied.  See the License for the
+specific language governing permissions and limitations
+under the License.
+-->
+
+YuniKorn leverages [Prometheus](https://prometheus.io/) to record metrics. The metrics system keeps tracking of
+scheduler's critical execution paths, to reveal potential performance bottlenecks. Currently, there are two categories
+for these metrics:
+
+- scheduler: generic metrics of the scheduler, such as allocation latency, num of apps etc.
+- queue: each queue has its own metrics sub-system, tracking queue status.
+
+all metrics are declared in `yunikorn` namespace.
+
+## Access Metrics
+
+YuniKorn metrics are collected through Prometheus client library, and exposed via scheduler restful service.
+Once started, they can be accessed via endpoint http://localhost:9080/ws/v1/metrics.
+
+## Aggregate Metrics to Prometheus
+
+It's simple to setup a Prometheus server to grab YuniKorn metrics periodically. Follow these steps:
+
+- Setup Prometheus (read more from [Prometheus docs](https://prometheus.io/docs/prometheus/latest/installation/))
+
+- Configure Prometheus rules: a sample configuration 
+
+```yaml
+global:
+  scrape_interval:     3s
+  evaluation_interval: 15s
+
+scrape_configs:
+  - job_name: 'yunikorn'
+    scrape_interval: 1s
+    metrics_path: '/ws/v1/metrics'
+    static_configs:
+    - targets: ['docker.for.mac.host.internal:9080']
+```
+
+- start Prometheus
+
+```shell script
+docker pull prom/prometheus:latest
+docker run -p 9090:9090 -v /path/to/prometheus.yml:/etc/prometheus/prometheus.yml prom/prometheus
+```
+
+Use `docker.for.mac.host.internal` instead of `localhost` if you are running Prometheus in a local docker container
+on Mac OS. Once started, open Prometheus web UI: http://localhost:9090/graph. You'll see all available metrics from
+YuniKorn scheduler.
+
diff --git a/docs/performance/profiling.md b/versioned_docs/version-0.9.0/performance/profiling.md
similarity index 99%
copy from docs/performance/profiling.md
copy to versioned_docs/version-0.9.0/performance/profiling.md
index dbbff84..4050ccd 100644
--- a/docs/performance/profiling.md
+++ b/versioned_docs/version-0.9.0/performance/profiling.md
@@ -60,7 +60,7 @@ you can type command such as `web` or `gif` to get a graph that helps you better
 understand the overall performance on critical code paths. You can get something
 like below:
 
-![CPU Profiling](/img/cpu_profile.jpg)
+![CPU Profiling](./../assets/cpu_profile.jpg)
 
 Note, in order to use these
 options, you need to install the virtualization tool `graphviz` first, if you are using Mac, simply run `brew install graphviz`, for more info please refer [here](https://graphviz.gitlab.io/).
diff --git a/docs/user_guide/acls.md b/versioned_docs/version-0.9.0/user_guide/acls.md
similarity index 96%
copy from docs/user_guide/acls.md
copy to versioned_docs/version-0.9.0/user_guide/acls.md
index 19887a8..8f41a80 100644
--- a/docs/user_guide/acls.md
+++ b/versioned_docs/version-0.9.0/user_guide/acls.md
@@ -23,7 +23,8 @@ under the License.
 -->
 
 :::caution
-Warning! This feature has not been fully implemented. Please use the wildcard ACL for now. 
+User information is currently not passed to the core scheduler from the kubernetes shim.
+Therefore, the recommendation is to use the wildcard ACL on the root queue for now as per the default configuration.
 :::
 
 ## Usage
diff --git a/docs/user_guide/placement_rules.md b/versioned_docs/version-0.9.0/user_guide/placement_rules.md
similarity index 99%
copy from docs/user_guide/placement_rules.md
copy to versioned_docs/version-0.9.0/user_guide/placement_rules.md
index e2ed44c..5f2c64d 100644
--- a/docs/user_guide/placement_rules.md
+++ b/versioned_docs/version-0.9.0/user_guide/placement_rules.md
@@ -22,7 +22,7 @@ specific language governing permissions and limitations
 under the License.
 -->
 
-The basics for the placement rules are described in the [scheduler design document](design/scheduler_configuration.md#placement-rules-definition).
+The basics for the placement rules are described in the [scheduler configuration design document](design/scheduler_configuration.md#placement-rules-definition).
 Multiple rules can be chained to form a placement policy.
 [Access control lists](user_guide/acls.md) and rule filters are defined per rule and enforced per rule.
 This document explains how to build a policy, including the rule usage, that is part of the scheduler with examples.
diff --git a/docs/user_guide/queue_config.md b/versioned_docs/version-0.9.0/user_guide/queue_config.md
similarity index 99%
copy from docs/user_guide/queue_config.md
copy to versioned_docs/version-0.9.0/user_guide/queue_config.md
index 007d5a0..fb3e4b8 100644
--- a/docs/user_guide/queue_config.md
+++ b/versioned_docs/version-0.9.0/user_guide/queue_config.md
@@ -22,7 +22,7 @@ specific language governing permissions and limitations
 under the License.
 -->
 
-The basis for the queue configuration is given in the [scheduler design document](design/architecture.md#configurations--semantics).
+The basis for the queue configuration is given in the [configuration design document](design/scheduler_configuration.md).
 
 This document provides the generic queue configuration.
 It references both the [Access control lists](user_guide/acls.md) and [placement rule](user_guide/placement_rules.md) documentation.
diff --git a/versioned_docs/version-0.9.0/user_guide/resource_quota_mgmt.md b/versioned_docs/version-0.9.0/user_guide/resource_quota_mgmt.md
new file mode 100644
index 0000000..e1e2e54
--- /dev/null
+++ b/versioned_docs/version-0.9.0/user_guide/resource_quota_mgmt.md
@@ -0,0 +1,152 @@
+---
+id: resource_quota_management
+title: Resource Quota Management
+---
+
+<!--
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an
+"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+KIND, either express or implied.  See the License for the
+specific language governing permissions and limitations
+under the License.
+-->
+
+YuniKorn can offer more fine-grained resource quota management comparing to simply
+using namespace resource quota. Here are some how-to documents about setting up
+resource quota management with YuniKorn queues.
+
+## Option 1) Static queues
+
+### Goal
+
+Pre-setup a hierarchy of queues with min/max capacity, users can only submit
+jobs to the leaf queues. This approach fully manages the resource capacity for
+each of the queues, which is suitable to the scenarios that queues do not change
+too often.
+
+### Configuration
+
+:::note
+The following configuration is an example to demonstrate the format,
+you need to setup the queue hierarchy based on your own structure and capacity,
+:::
+
+Apply the following configuration to YuniKorn's configmap:
+
+```yaml
+partitions:
+  -
+    name: default
+    queues:
+      -
+        name: root
+        submitacl: '*'
+        queues:
+          -
+            name: advertisement
+            resources:
+              guaranteed:
+                memory: 500000
+                vcore: 50000
+              max:
+                memory: 800000
+                vcore: 80000
+          -
+            name: search
+            resources:
+              guaranteed:
+                memory: 400000
+                vcore: 40000
+              max:
+                memory: 600000
+                vcore: 60000
+          -
+            name: sandbox
+            resources:
+              guaranteed:
+                memory: 100000
+                vcore: 10000
+              max:
+                memory: 100000
+                vcore: 10000
+```
+
+in this example, we are going to setup 3 queues under root, and each of them has
+a specific min/max capacity set up.
+
+### Run workloads
+
+In order to run jobs in specific queues, you will need to set the following label in all pods' spec:
+
+```yaml
+labels:
+  app: my-test-app
+  applicationId: " my-test-app-01"
+  queue: root.sandbox
+```
+
+## Option 2) 1:1 mapping from namespaces to queues
+
+### Goal
+
+User just needs to setup namespaces, YuniKorn automatically maps each namespace to an internal resource queue (AKA dynamical queue).
+There is no additional steps to create YuniKorn queues, all queues will be created dynamically,
+resource allocation and quotas will be managed by YuniKorn instead of the namespace resource quota.
+
+### Configuration
+
+Apply the following configuration to YuniKorn's configmap:
+
+```yaml
+partitions:
+  -
+    name: default
+    placementrules:
+      - name: tag
+        value: namespace
+        create: true
+    queues:
+      - name: root
+        submitacl: '*'
+        properties:
+          application.sort.policy: stateaware
+
+```
+
+Note, the property `application.sort.policy` in this configuration is set to
+`stateaware`. This is a simple app sorting policy applicable for batch jobs, you
+can find more document [here](sorting_policies.md#StateAwarePolicy).
+
+You can do this during the installation by overwriting the configuration in the
+[helm chart template](https://github.com/apache/incubator-yunikorn-release/blob/724ec82d0d548598e170cc6d5ca6aaae00f8286c/helm-charts/yunikorn/values.yaml#L71-L81).
+
+### Set up namespaces
+
+Continue to create namespaces like before, do not create namespace quota anymore.
+Instead, set the following annotation in the namespace object:
+
+```yaml
+yunikorn.apache.org/namespace.max.cpu: "64"
+yunikorn.apache.org/namespace.max.memory: "100Gi"
+```
+
+YuniKorn will parse the annotation and set the max capacity of the dynamical queue
+that mapped to this namespace to 64 CPU and 100GB memory.
+
+### Run workloads
+
+Jobs continue to be submitted to namespaces, based on the `Placementrule` used
+in the configuration. YuniKorn will automatically run the job and all its pods in
+the corresponding queue. For example, if a job is submitted to namespace `development`,
+then you will see the job is running in `root.development` queue.
diff --git a/versioned_docs/version-0.9.0/user_guide/sorting_policies.md b/versioned_docs/version-0.9.0/user_guide/sorting_policies.md
new file mode 100644
index 0000000..f969cc0
--- /dev/null
+++ b/versioned_docs/version-0.9.0/user_guide/sorting_policies.md
@@ -0,0 +1,154 @@
+---
+id: sorting_policies
+title: Sorting Policies
+---
+
+<!--
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ -->
+
+The scheduler uses policies allow changing the scheduling behaviour without code changes.
+Policies can be set for:
+* [Applications](#application-sorting)
+* [Nodes](#node-sorting)
+* [Requests](#request-sorting)
+
+## Application sorting
+The application sorting policy is set for each queue via the config.
+A sorting policy setting is only effective on a `leaf` queue.
+Each `leaf` queue can use a different policy.
+
+A sorting policy only specifies the order in which the applications are sorted within a queue.
+That order is crucial in specifying which application is considered first when assigning resources.
+Sorting policies do _not_ affect the number of applications that are scheduled or active in the queue at the same time.
+All applications that have pending resource requests can and will be scheduled in a queue unless specifically filtered out.
+Even when applications are sorted using a first in first out policy multiple applications will run in a queue in parallel. 
+
+A `parent` queue will always use the fair policy to sort the child queues.
+
+The following configuration entry sets the application sorting policy to `fifo` for the queue `root.sandbox`: 
+```yaml
+partitions:
+  - name: default
+    queues:
+    - name: root
+      queues:
+      - name: sandbox
+        properties:
+          application.sort.policy: fifo
+```
+
+The only applications that are considered during scheduling must have outstanding requests.
+A filter is applied _while_ sorting the applications to remove all that do not have outstanding requests.
+
+### FifoSortPolicy
+Short description: first in first out, based on application create time  
+Config value: fifo (default)  
+Behaviour:  
+Before sorting the applications are filtered and must have pending resource requests.
+
+After filtering the applications left are sorted based on the application create time stamp only, no other filtering is applied. 
+Since applications can only be added while the system is locked there can never be two applications with the exact same time stamp. 
+
+The result is that the oldest application that requests resources gets resources.
+Younger applications will be given resources when all the current requests of older applications have been fulfilled. 
+
+### FairSortPolicy
+Short description: fair based on usage  
+Config value: fair  
+Behaviour:  
+Before sorting the applications are filtered and must have pending resource requests.
+
+After filtering the applications left are sorted based on the application usage.
+The usage of the application is defined as all confirmed and unconfirmed allocations for the applications. 
+All resources defined on the application will be taken into account when calculating the usage.
+
+The result is that the resources available are spread equally over all applications that request resources.
+
+### StateAwarePolicy
+Short description: limit of one (1) application in Starting or Accepted state  
+Config value: stateaware  
+Behaviour:  
+This sorting policy requires an understanding of the application states.
+Applications states are described in the [application states](design/scheduler_object_states.md#application-state) documentation.
+
+Before sorting applications the following filters are applied to all applications in the queue:
+The first filter is based on the application state.
+The following applications pass through the filter and generate the first intermediate list:
+* all applications in the state _running_
+* _one_ (1) application in the _starting_ state
+* if there are _no_ applications in the _starting_ state _one_ (1) application in the _accepted_ state is added
+
+The second filter takes the result of the first filter as an input.
+The preliminary list is filtered again: all applications _without_ a pending request are removed.
+
+After filtering based on status and pending requests the applications that remain are sorted.
+The final list is thus filtered twice with the remaining applications sorted on create time.
+
+To recap the _staring_ and _accepted_ state interactions: 
+The application in the _accepted_ state is only added if there is no application in the _starting_ state.
+The application in the _starting_ state does not have to have pending requests.
+Any application in the _starting_ state will prevent _accepted_ applications from being added to the filtered list.
+
+For further details see the [Example run](design/state_aware_scheduling.md#example-run) in the design document.
+
+The result is that already running applications that request resources will get resources first.
+A drip feed of one new applications is added to the list of running applications to be allocated after all running applications.  
+
+## Node sorting
+The node sorting policy is set for a partition via the config.
+Each partition can use a different policy.
+
+The following configuration entry sets the node sorting policy to `fair` for the partition `default`: 
+```yaml
+partitions:
+  - name: default
+    nodesortpolicy:
+        type: fair
+```
+
+### FairnessPolicy
+Short description: available resource, descending order  
+Config value: fair (default)  
+Behaviour:  
+Sort the list of nodes by the amount of available resources so that the node with the _highest_ amount of available resource is the first in the list.
+All resources defined on a node will be taken into account when calculating the usage.
+Resources of the same type are compared for the nodes. 
+
+This results in a node with the lowest utilisation to be considered first for assigning new allocation.
+Resulting in a spread of allocations over all available nodes.
+Leading to an overall lower utilisation of the individual available nodes, unless the whole environment is highly utilised.
+Keeping the load on all nodes at a similar level does help 
+In an environment that auto scales by adding new nodes this could trigger unexpected auto scale requests.   
+
+### BinPackingPolicy
+Short description: available resource, ascending order  
+Config value: binpacking  
+Behaviour:  
+Sort the list of nodes by the amount of available resources so that the node with the _lowest_ amount of available resource is the first in the list.
+All resources defined on a node will be taken into account when calculating the usage. 
+Resources of the same type are compared for the nodes. 
+
+This results in a node with the highest utilisation to be considered first for assigning new allocation.
+Resulting in a high(er) utilisation of a small(er) number of nodes, better suited for cloud deployments.   
+
+## Request sorting
+There is currently one policy for sorting requests within an application.
+This policy is not configurable.
+Sorting requests is only possible based on the priority of the request.
+If there are multiple requests within an application that have the same priority the order of the requests is undetermined.
+This means that the order of requests with the same priority can, and most likely will, change between runs.
\ No newline at end of file
diff --git a/versioned_docs/version-0.9.0/user_guide/trouble_shooting.md b/versioned_docs/version-0.9.0/user_guide/trouble_shooting.md
new file mode 100644
index 0000000..1e3876e
--- /dev/null
+++ b/versioned_docs/version-0.9.0/user_guide/trouble_shooting.md
@@ -0,0 +1,153 @@
+---
+id: trouble_shooting
+title: Trouble Shooting
+---
+
+<!--
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ -->
+ 
+## Scheduler logs
+
+### Retrieve scheduler logs
+
+Currently, the scheduler writes its logs to stdout/stderr, docker container handles the redirection of these logs to a
+local location on the underneath node, you can read more document [here](https://docs.docker.com/config/containers/logging/configure/).
+These logs can be retrieved by [kubectl logs](https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands#logs). Such as:
+
+```shell script
+// get the scheduler pod
+kubectl get pod -l component=yunikorn-scheduler -n yunikorn
+NAME                                  READY   STATUS    RESTARTS   AGE
+yunikorn-scheduler-766d7d6cdd-44b82   2/2     Running   0          33h
+
+// retrieve logs
+kubectl logs yunikorn-scheduler-766d7d6cdd-44b82 yunikorn-scheduler-k8s -n yunikorn
+```
+
+In most cases, this command cannot get all logs because the scheduler is rolling logs very fast. To retrieve more logs in
+the past, you will need to setup the [cluster level logging](https://kubernetes.io/docs/concepts/cluster-administration/logging/#cluster-level-logging-architectures).
+The recommended setup is to leverage [fluentd](https://www.fluentd.org/) to collect and persistent logs on an external storage, e.g s3. 
+
+### Set Logging Level
+
+:::note
+Changing the logging level requires a restart of the scheduler pod.
+:::
+
+Stop the scheduler:
+
+```shell script
+kubectl scale deployment yunikorn-scheduler -n yunikorn --replicas=0
+```
+edit the deployment config in vim:
+
+```shell script
+kubectl edit deployment yunikorn-scheduler -n yunikorn
+```
+
+add `LOG_LEVEL` to the `env` field of the container template. For example setting `LOG_LEVEL` to `0` sets the logging
+level to `INFO`.
+
+```yaml
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+ ...
+spec:
+  template: 
+   ...
+    spec:
+      containers:
+      - env:
+        - name: LOG_LEVEL
+          value: '0'
+```
+
+Start the scheduler:
+
+```shell script
+kubectl scale deployment yunikorn-scheduler -n yunikorn --replicas=1
+```
+
+Available logging levels:
+
+| Value 	| Logging Level 	|
+|:-----:	|:-------------:	|
+|   -1  	|     DEBUG     	|
+|   0   	|      INFO     	|
+|   1   	|      WARN     	|
+|   2   	|     ERROR     	|
+|   3   	|     DPanic    	|
+|   4   	|     Panic     	|
+|   5   	|     Fatal     	|
+
+## Pods are stuck at Pending state
+
+If some pods are stuck at Pending state, that means the scheduler could not find a node to allocate the pod. There are
+several possibilities to cause this:
+
+### 1. Non of the nodes satisfy pod placement requirement
+
+A pod can be configured with some placement constraints, such as [node-selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector),
+[affinity/anti-affinity](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity),
+do not have certain toleration for node [taints](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/), etc.
+To debug such issues, you can describe the pod by:
+
+```shell script
+kubectl describe pod <pod-name> -n <namespace>
+```
+
+the pod events will contain the predicate failures and that explains why nodes are not qualified for allocation.
+
+### 2. The queue is running out of capacity
+
+If the queue is running out of capacity, pods will be pending for available queue resources. To check if a queue is still
+having enough capacity for the pending pods, there are several approaches:
+
+1) check the queue usage from yunikorn UI
+
+If you do not know how to access the UI, you can refer the document [here](../get_started/get_started.md#access-the-web-ui). Go
+to the `Queues` page, navigate to the queue where this job is submitted to. You will be able to see the available capacity
+left for the queue.
+
+2) check the pod events
+
+Run the `kubectl describe pod` to get the pod events. If you see some event like:
+`Application <appID> does not fit into <queuePath> queue`. That means the pod could not get allocated because the queue
+is running out of capacity.
+
+The pod will be allocated if some other pods in this queue is completed or removed. If the pod remains pending even
+the queue has capacity, that may because it is waiting for the cluster to scale up.
+
+## Restart the scheduler
+
+YuniKorn can recover its state upon a restart. YuniKorn scheduler pod is deployed as a deployment, restart the scheduler
+can be done by scale down and up the replica:
+
+```shell script
+kubectl scale deployment yunikorn-scheduler -n yunikorn --replicas=0
+kubectl scale deployment yunikorn-scheduler -n yunikorn --replicas=1
+```
+
+## Still got questions?
+
+No problem! The Apache YuniKorn community will be happy to help. You can reach out to the community with the following options:
+
+1. Post your questions to dev@yunikorn.apache.org
+2. Join the [YuniKorn slack channel](https://join.slack.com/t/yunikornworkspace/shared_invite/enQtNzAzMjY0OTI4MjYzLTBmMDdkYTAwNDMwNTE3NWVjZWE1OTczMWE4NDI2Yzg3MmEyZjUyYTZlMDE5M2U4ZjZhNmYyNGFmYjY4ZGYyMGE) and post your questions to the `#yunikorn-user` channel.
+3. Join the [community sync up meetings](http://yunikorn.apache.org/community/getInvolved#community-meetings) and directly talk to the community members. 
\ No newline at end of file
diff --git a/versioned_docs/version-0.9.0/user_guide/workloads/run_flink.md b/versioned_docs/version-0.9.0/user_guide/workloads/run_flink.md
new file mode 100644
index 0000000..d20e815
--- /dev/null
+++ b/versioned_docs/version-0.9.0/user_guide/workloads/run_flink.md
@@ -0,0 +1,66 @@
+---
+id: run_flink
+title: Run Flink Jobs
+description: How to run Flink jobs with YuniKorn
+image: https://svn.apache.org/repos/asf/flink/site/img/logo/png/100/flink_squirrel_100_color.png
+keywords:
+ - spark
+---
+
+<!--
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an
+"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+KIND, either express or implied.  See the License for the
+specific language governing permissions and limitations
+under the License.
+-->
+
+It's very easy to run [Apache Flink](https://flink.apache.org/) on Kubernetes with YuniKorn. Depending on which mode is
+used to run Flink on Kubernetes, the configuration is slight different.
+
+## Standalone mode
+
+Please follow [Kubernetes Setup](https://ci.apache.org/projects/flink/flink-docs-stable/ops/deployment/kubernetes.html) to get details and examples of standalone deploy mode.
+In this mode, we can directly add required labels (applicationId and queue) in Deployment/Job spec to run flink application with YuniKorn scheduler, as well as [Run workloads with YuniKorn Scheduler](#run-workloads-with-yunikorn-scheduler).
+
+## Native mode
+
+Please follow [Native Kubernetes Setup](https://ci.apache.org/projects/flink/flink-docs-stable/ops/deployment/native_kubernetes.html) to get details and examples of native deploy mode.
+Running flink application with YuniKorn scheduler in native mode is only supported for flink 1.11 or above, we can leverage two flink configurations `kubernetes.jobmanager.labels` and `kubernetes.taskmanager.labels` to set the required labels.
+Examples:
+
+* Start a flink session
+```
+./bin/kubernetes-session.sh \
+  -Dkubernetes.cluster-id=<ClusterId> \
+  -Dtaskmanager.memory.process.size=4096m \
+  -Dkubernetes.taskmanager.cpu=2 \
+  -Dtaskmanager.numberOfTaskSlots=4 \
+  -Dresourcemanager.taskmanager-timeout=3600000 \
+  -Dkubernetes.jobmanager.labels=applicationId:MyOwnApplicationId,queue:root.sandbox \
+  -Dkubernetes.taskmanager.labels=applicationId:MyOwnApplicationId,queue:root.sandbox
+```
+
+* Start a flink application
+```
+./bin/flink run-application -p 8 -t kubernetes-application \
+  -Dkubernetes.cluster-id=<ClusterId> \
+  -Dtaskmanager.memory.process.size=4096m \
+  -Dkubernetes.taskmanager.cpu=2 \
+  -Dtaskmanager.numberOfTaskSlots=4 \
+  -Dkubernetes.container.image=<CustomImageName> \
+  -Dkubernetes.jobmanager.labels=applicationId:MyOwnApplicationId,queue:root.sandbox \
+  -Dkubernetes.taskmanager.labels=applicationId:MyOwnApplicationId,queue:root.sandbox \
+  local:///opt/flink/usrlib/my-flink-job.jar
+```
\ No newline at end of file
diff --git a/docs/user_guide/workloads/run_spark.md b/versioned_docs/version-0.9.0/user_guide/workloads/run_spark.md
similarity index 97%
copy from docs/user_guide/workloads/run_spark.md
copy to versioned_docs/version-0.9.0/user_guide/workloads/run_spark.md
index 0b1655a..cee95df 100644
--- a/docs/user_guide/workloads/run_spark.md
+++ b/versioned_docs/version-0.9.0/user_guide/workloads/run_spark.md
@@ -116,12 +116,12 @@ ${SPARK_HOME}/bin/spark-submit --master k8s://http://localhost:8001 --deploy-mod
 
 You'll see Spark driver and executors been created on Kubernetes:
 
-![spark-pods](/img/spark-pods.png)
+![spark-pods](./../../assets/spark-pods.png)
 
 You can also view the job info from YuniKorn UI. If you do not know how to access the YuniKorn UI, please read the document
 [here](../../get_started/get_started.md#access-the-web-ui).
 
-![spark-jobs-on-ui](/img/spark-jobs-on-ui.png)
+![spark-jobs-on-ui](./../../assets/spark-jobs-on-ui.png)
 
 ## What happens behind the scenes?
 
diff --git a/versioned_docs/version-0.9.0/user_guide/workloads/run_tensorflow.md b/versioned_docs/version-0.9.0/user_guide/workloads/run_tensorflow.md
new file mode 100644
index 0000000..393e330
--- /dev/null
+++ b/versioned_docs/version-0.9.0/user_guide/workloads/run_tensorflow.md
@@ -0,0 +1,40 @@
+---
+id: run_tf
+title: Run Tensorflow Jobs
+keywords:
+ - tensorflow
+---
+
+<!--
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an
+"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+KIND, either express or implied.  See the License for the
+specific language governing permissions and limitations
+under the License.
+-->
+
+Here is an example for Tensorflow job. You must install tf-operator first. 
+You can install tf-operator by applying all yaml from two website down below:
+* CRD: https://github.com/kubeflow/manifests/tree/master/tf-training/tf-job-crds/base
+* Deployment: https://github.com/kubeflow/manifests/tree/master/tf-training/tf-job-operator/base
+Also you can install kubeflow which can auto install tf-operator for you, URL: https://www.kubeflow.org/docs/started/getting-started/
+
+A simple Tensorflow job example:
+
+You need to [build the image](https://github.com/kubeflow/tf-operator/tree/master/examples/v1/dist-mnist) which used in example yaml.
+```
+kubectl create -f examples/tfjob/tf-job-mnist.yaml
+```
+
+The file for this example can be found in the [README Tensorflow job](https://github.com/apache/incubator-yunikorn-k8shim/tree/master/deployments/examples#Tensorflow-job) section.
diff --git a/versioned_sidebars/version-0.9.0-sidebars.json b/versioned_sidebars/version-0.9.0-sidebars.json
new file mode 100644
index 0000000..845ed5f
--- /dev/null
+++ b/versioned_sidebars/version-0.9.0-sidebars.json
@@ -0,0 +1,177 @@
+{
+  "version-0.9.0/docs": [
+    {
+      "collapsed": true,
+      "type": "category",
+      "label": "Get Started",
+      "items": [
+        {
+          "type": "doc",
+          "id": "version-0.9.0/get_started/user_guide"
+        },
+        {
+          "type": "doc",
+          "id": "version-0.9.0/get_started/core_features"
+        }
+      ]
+    },
+    {
+      "collapsed": true,
+      "type": "category",
+      "label": "User Guide",
+      "items": [
+        {
+          "type": "doc",
+          "id": "version-0.9.0/user_guide/queue_config"
+        },
+        {
+          "type": "doc",
+          "id": "version-0.9.0/user_guide/placement_rules"
+        },
+        {
+          "type": "doc",
+          "id": "version-0.9.0/user_guide/sorting_policies"
+        },
+        {
+          "type": "doc",
+          "id": "version-0.9.0/user_guide/acls"
+        },
+        {
+          "type": "doc",
+          "id": "version-0.9.0/user_guide/resource_quota_management"
+        },
+        {
+          "collapsed": true,
+          "type": "category",
+          "label": "Workloads",
+          "items": [
+            {
+              "type": "doc",
+              "id": "version-0.9.0/user_guide/workloads/run_spark"
+            },
+            {
+              "type": "doc",
+              "id": "version-0.9.0/user_guide/workloads/run_flink"
+            },
+            {
+              "type": "doc",
+              "id": "version-0.9.0/user_guide/workloads/run_tf"
+            }
+          ]
+        },
+        {
+          "collapsed": true,
+          "type": "category",
+          "label": "REST APIs",
+          "items": [
+            {
+              "type": "doc",
+              "id": "version-0.9.0/api/cluster"
+            },
+            {
+              "type": "doc",
+              "id": "version-0.9.0/api/scheduler"
+            },
+            {
+              "type": "doc",
+              "id": "version-0.9.0/api/system"
+            }
+          ]
+        },
+        {
+          "type": "doc",
+          "id": "version-0.9.0/user_guide/trouble_shooting"
+        }
+      ]
+    },
+    {
+      "collapsed": true,
+      "type": "category",
+      "label": "Developer Guide",
+      "items": [
+        {
+          "type": "doc",
+          "id": "version-0.9.0/developer_guide/env_setup"
+        },
+        {
+          "type": "doc",
+          "id": "version-0.9.0/developer_guide/build"
+        },
+        {
+          "type": "doc",
+          "id": "version-0.9.0/developer_guide/deployment"
+        },
+        {
+          "collapsed": true,
+          "type": "category",
+          "label": "Designs",
+          "items": [
+            {
+              "type": "doc",
+              "id": "version-0.9.0/design/architecture"
+            },
+            {
+              "type": "doc",
+              "id": "version-0.9.0/design/scheduler_core_design"
+            },
+            {
+              "type": "doc",
+              "id": "version-0.9.0/design/k8shim"
+            },
+            {
+              "type": "doc",
+              "id": "version-0.9.0/design/cross_queue_preemption"
+            },
+            {
+              "type": "doc",
+              "id": "version-0.9.0/design/namespace_resource_quota"
+            },
+            {
+              "type": "doc",
+              "id": "version-0.9.0/design/pluggable_app_management"
+            },
+            {
+              "type": "doc",
+              "id": "version-0.9.0/design/resilience"
+            },
+            {
+              "type": "doc",
+              "id": "version-0.9.0/design/predicates"
+            },
+            {
+              "type": "doc",
+              "id": "version-0.9.0/design/scheduler_configuration"
+            },
+            {
+              "type": "doc",
+              "id": "version-0.9.0/design/state_aware_scheduling"
+            },
+            {
+              "type": "doc",
+              "id": "version-0.9.0/design/scheduler_object_states"
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "collapsed": true,
+      "type": "category",
+      "label": "Performance",
+      "items": [
+        {
+          "type": "doc",
+          "id": "version-0.9.0/performance/evaluate_perf_function_with_kubemark"
+        },
+        {
+          "type": "doc",
+          "id": "version-0.9.0/performance/metrics"
+        },
+        {
+          "type": "doc",
+          "id": "version-0.9.0/performance/profiling"
+        }
+      ]
+    }
+  ]
+}
diff --git a/versions.json b/versions.json
index 61fa814..86c73ac 100644
--- a/versions.json
+++ b/versions.json
@@ -1,3 +1,4 @@
 [
+  "0.9.0",
   "0.8.0"
 ]