You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@linkis.apache.org by pe...@apache.org on 2021/10/21 08:59:25 UTC

[incubator-linkis-website] 20/43: add docs image

This is an automated email from the ASF dual-hosted git repository.

peacewong pushed a commit to branch dev
in repository https://gitbox.apache.org/repos/asf/incubator-linkis-website.git

commit 01580dd99c39f0caf8774ef6f68e8247dd7ebc30
Author: casionone <ca...@gmail.com>
AuthorDate: Tue Oct 12 15:27:30 2021 +0800

    add docs image
---
 info.txt                                           |   5 +
 .../add_an_EngineConn_flow_chart.png               | Bin
 .../EngineConn/engineconn-01.png                   | Bin
 .../Gateway/gateway_server_dispatcher.png          | Bin
 .../Gateway/gateway_server_global.png              | Bin
 .../Gateway/gatway_websocket.png                   | Bin
 .../JobSubmission}/execution.png                   | Bin
 .../JobSubmission}/orchestrate.png                 | Bin
 .../JobSubmission}/overall.png                     | Bin
 .../JobSubmission}/physical_tree.png               | Bin
 .../JobSubmission}/result_acquisition.png          | Bin
 .../JobSubmission}/submission.png                  | Bin
 .../LabelManager/label_manager_builder.png         | Bin
 .../LabelManager/label_manager_global.png          | Bin
 .../LabelManager/label_manager_scorer.png          | Bin
 .../Linkis0.X_newengine_architecture.png}          | Bin
 .../Linkis0.X_services_list.png}                   | Bin
 .../Linkis1.0_architecture.png}                    | Bin
 .../Linkis1.0_engineconn_architecture.png}         | Bin
 .../Linkis1.0_newengine_architecture.png}          | Bin
 .../Linkis1.0_newengine_initialization.png}        | Bin
 .../Linkis1.0_services_list.png}                   | Bin
 .../ContextService/linkis-contextservice-01.png    | Bin
 .../ContextService/linkis-contextservice-02.png    | Bin
 .../linkis-contextservice-cache-01.png             | Bin
 .../linkis-contextservice-cache-02.png             | Bin
 .../linkis-contextservice-cache-03.png             | Bin
 .../linkis-contextservice-cache-04.png             | Bin
 .../linkis-contextservice-cache-05.png             | Bin
 .../linkis-contextservice-client-01.png            | Bin
 .../linkis-contextservice-client-02.png            | Bin
 .../linkis-contextservice-client-03.png            | Bin
 .../ContextService/linkis-contextservice-ha-01.png | Bin
 .../ContextService/linkis-contextservice-ha-02.png | Bin
 .../ContextService/linkis-contextservice-ha-03.png | Bin
 .../ContextService/linkis-contextservice-ha-04.png | Bin
 .../linkis-contextservice-listener-01.png          | Bin
 .../linkis-contextservice-listener-02.png          | Bin
 .../linkis-contextservice-listener-03.png          | Bin
 .../linkis-contextservice-persistence-01.png       | Bin
 .../linkis-contextservice-search-01.png            | Bin
 .../linkis-contextservice-search-02.png            | Bin
 .../linkis-contextservice-search-03.png            | Bin
 .../linkis-contextservice-search-04.png            | Bin
 .../linkis-contextservice-search-05.png            | Bin
 .../linkis-contextservice-search-06.png            | Bin
 .../linkis-contextservice-search-07.png            | Bin
 .../linkis-contextservice-service-01.png           | Bin
 .../linkis-contextservice-service-02.png           | Bin
 .../linkis-contextservice-service-03.png           | Bin
 .../linkis-contextservice-service-04.png           | Bin
 .../add_an_engineConn_flow_chart.png}              | Bin
 .../bml-02.png => architecture/bml_02.png}         | Bin
 .../linkis_engineconnplugin_01.png}                | Bin
 .../linkis_intro_01.png}                           | Bin
 .../linkis_intro_02.png}                           | Bin
 .../linkis_microservice_gov_01.png}                | Bin
 .../linkis_microservice_gov_03.png}                | Bin
 .../linkis_publicservice_01.png}                   | Bin
 .../publicenhencement_architecture.png}            | Bin
 .../docs/manual/{queue-set.png => queue_set.png}   | Bin
 .../manual/{sparksql-run.png => sparksql_run.png}  | Bin
 src/docs/architecture/AddEngineConn_en.md          | 105 +++++++++++++
 src/docs/architecture/AddEngineConn_zh.md          | 111 ++++++++++++++
 .../architecture/DifferenceBetween1.0&0.x_en.md    |  50 +++++++
 .../architecture/DifferenceBetween1.0&0.x_zh.md    |  98 ++++++++++++
 src/docs/architecture/JobSubmission_en.md          | 138 +++++++++++++++++
 src/docs/architecture/JobSubmission_zh.md          | 165 +++++++++++++++++++++
 src/docs/deploy/linkis_en.md                       |   4 +-
 src/docs/deploy/linkis_zh.md                       |   4 +-
 src/docs/manual/CliManual_en.md                    |   6 +-
 src/docs/manual/HowToUse_en.md                     |  11 +-
 src/docs/manual/HowToUse_zh.md                     |   6 +-
 src/pages/docs/architecture/AddEngineConn.vue      |  13 ++
 .../docs/architecture/DifferenceBetween1.0&0.x.vue |  13 ++
 src/pages/docs/architecture/JobSubmission.vue      |  13 ++
 src/pages/docs/index.vue                           |  18 +++
 src/router.js                                      |  19 ++-
 78 files changed, 762 insertions(+), 17 deletions(-)

diff --git a/info.txt b/info.txt
new file mode 100644
index 0000000..072fede
--- /dev/null
+++ b/info.txt
@@ -0,0 +1,5 @@
+podling网站
+http://incubator.apache.org/guides/sites.html#podling_website_requirements
+
+web url要求
+https://www.apache.org/foundation/marks/pmcs#websites
diff --git a/src/assets/docs/Architecture/Add_an_EngineConn/add_an_EngineConn_flow_chart.png b/src/assets/docs/architecture/Add_an_EngineConn/add_an_EngineConn_flow_chart.png
similarity index 100%
copy from src/assets/docs/Architecture/Add_an_EngineConn/add_an_EngineConn_flow_chart.png
copy to src/assets/docs/architecture/Add_an_EngineConn/add_an_EngineConn_flow_chart.png
diff --git a/src/assets/docs/Architecture/EngineConn/engineconn-01.png b/src/assets/docs/architecture/EngineConn/engineconn-01.png
similarity index 100%
rename from src/assets/docs/Architecture/EngineConn/engineconn-01.png
rename to src/assets/docs/architecture/EngineConn/engineconn-01.png
diff --git a/src/assets/docs/Architecture/Gateway/gateway_server_dispatcher.png b/src/assets/docs/architecture/Gateway/gateway_server_dispatcher.png
similarity index 100%
rename from src/assets/docs/Architecture/Gateway/gateway_server_dispatcher.png
rename to src/assets/docs/architecture/Gateway/gateway_server_dispatcher.png
diff --git a/src/assets/docs/Architecture/Gateway/gateway_server_global.png b/src/assets/docs/architecture/Gateway/gateway_server_global.png
similarity index 100%
rename from src/assets/docs/Architecture/Gateway/gateway_server_global.png
rename to src/assets/docs/architecture/Gateway/gateway_server_global.png
diff --git a/src/assets/docs/Architecture/Gateway/gatway_websocket.png b/src/assets/docs/architecture/Gateway/gatway_websocket.png
similarity index 100%
rename from src/assets/docs/Architecture/Gateway/gatway_websocket.png
rename to src/assets/docs/architecture/Gateway/gatway_websocket.png
diff --git a/src/assets/docs/Architecture/Job_submission_preparation_and_execution_process/execution.png b/src/assets/docs/architecture/JobSubmission/execution.png
similarity index 100%
rename from src/assets/docs/Architecture/Job_submission_preparation_and_execution_process/execution.png
rename to src/assets/docs/architecture/JobSubmission/execution.png
diff --git a/src/assets/docs/Architecture/Job_submission_preparation_and_execution_process/orchestrate.png b/src/assets/docs/architecture/JobSubmission/orchestrate.png
similarity index 100%
rename from src/assets/docs/Architecture/Job_submission_preparation_and_execution_process/orchestrate.png
rename to src/assets/docs/architecture/JobSubmission/orchestrate.png
diff --git a/src/assets/docs/Architecture/Job_submission_preparation_and_execution_process/overall.png b/src/assets/docs/architecture/JobSubmission/overall.png
similarity index 100%
rename from src/assets/docs/Architecture/Job_submission_preparation_and_execution_process/overall.png
rename to src/assets/docs/architecture/JobSubmission/overall.png
diff --git a/src/assets/docs/Architecture/Job_submission_preparation_and_execution_process/physical_tree.png b/src/assets/docs/architecture/JobSubmission/physical_tree.png
similarity index 100%
rename from src/assets/docs/Architecture/Job_submission_preparation_and_execution_process/physical_tree.png
rename to src/assets/docs/architecture/JobSubmission/physical_tree.png
diff --git a/src/assets/docs/Architecture/Job_submission_preparation_and_execution_process/result_acquisition.png b/src/assets/docs/architecture/JobSubmission/result_acquisition.png
similarity index 100%
rename from src/assets/docs/Architecture/Job_submission_preparation_and_execution_process/result_acquisition.png
rename to src/assets/docs/architecture/JobSubmission/result_acquisition.png
diff --git a/src/assets/docs/Architecture/Job_submission_preparation_and_execution_process/submission.png b/src/assets/docs/architecture/JobSubmission/submission.png
similarity index 100%
rename from src/assets/docs/Architecture/Job_submission_preparation_and_execution_process/submission.png
rename to src/assets/docs/architecture/JobSubmission/submission.png
diff --git a/src/assets/docs/Architecture/LabelManager/label_manager_builder.png b/src/assets/docs/architecture/LabelManager/label_manager_builder.png
similarity index 100%
rename from src/assets/docs/Architecture/LabelManager/label_manager_builder.png
rename to src/assets/docs/architecture/LabelManager/label_manager_builder.png
diff --git a/src/assets/docs/Architecture/LabelManager/label_manager_global.png b/src/assets/docs/architecture/LabelManager/label_manager_global.png
similarity index 100%
rename from src/assets/docs/Architecture/LabelManager/label_manager_global.png
rename to src/assets/docs/architecture/LabelManager/label_manager_global.png
diff --git a/src/assets/docs/Architecture/LabelManager/label_manager_scorer.png b/src/assets/docs/architecture/LabelManager/label_manager_scorer.png
similarity index 100%
rename from src/assets/docs/Architecture/LabelManager/label_manager_scorer.png
rename to src/assets/docs/architecture/LabelManager/label_manager_scorer.png
diff --git a/src/assets/docs/Architecture/Linkis0.X-NewEngine-architecture.png b/src/assets/docs/architecture/Linkis0.X_newengine_architecture.png
similarity index 100%
rename from src/assets/docs/Architecture/Linkis0.X-NewEngine-architecture.png
rename to src/assets/docs/architecture/Linkis0.X_newengine_architecture.png
diff --git a/src/assets/docs/Architecture/Linkis0.X-services-list.png b/src/assets/docs/architecture/Linkis0.X_services_list.png
similarity index 100%
rename from src/assets/docs/Architecture/Linkis0.X-services-list.png
rename to src/assets/docs/architecture/Linkis0.X_services_list.png
diff --git a/src/assets/docs/Architecture/Linkis1.0-architecture.png b/src/assets/docs/architecture/Linkis1.0_architecture.png
similarity index 100%
rename from src/assets/docs/Architecture/Linkis1.0-architecture.png
rename to src/assets/docs/architecture/Linkis1.0_architecture.png
diff --git a/src/assets/docs/Architecture/Linkis1.0-EngineConn-architecture.png b/src/assets/docs/architecture/Linkis1.0_engineconn_architecture.png
similarity index 100%
rename from src/assets/docs/Architecture/Linkis1.0-EngineConn-architecture.png
rename to src/assets/docs/architecture/Linkis1.0_engineconn_architecture.png
diff --git a/src/assets/docs/Architecture/Linkis1.0-NewEngine-architecture.png b/src/assets/docs/architecture/Linkis1.0_newengine_architecture.png
similarity index 100%
rename from src/assets/docs/Architecture/Linkis1.0-NewEngine-architecture.png
rename to src/assets/docs/architecture/Linkis1.0_newengine_architecture.png
diff --git a/src/assets/docs/Architecture/Linkis1.0-newEngine-initialization.png b/src/assets/docs/architecture/Linkis1.0_newengine_initialization.png
similarity index 100%
rename from src/assets/docs/Architecture/Linkis1.0-newEngine-initialization.png
rename to src/assets/docs/architecture/Linkis1.0_newengine_initialization.png
diff --git a/src/assets/docs/Architecture/Linkis1.0-services-list.png b/src/assets/docs/architecture/Linkis1.0_services_list.png
similarity index 100%
rename from src/assets/docs/Architecture/Linkis1.0-services-list.png
rename to src/assets/docs/architecture/Linkis1.0_services_list.png
diff --git a/src/assets/docs/Architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-01.png b/src/assets/docs/architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-01.png
similarity index 100%
rename from src/assets/docs/Architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-01.png
rename to src/assets/docs/architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-01.png
diff --git a/src/assets/docs/Architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-02.png b/src/assets/docs/architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-02.png
similarity index 100%
rename from src/assets/docs/Architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-02.png
rename to src/assets/docs/architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-02.png
diff --git a/src/assets/docs/Architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-cache-01.png b/src/assets/docs/architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-cache-01.png
similarity index 100%
rename from src/assets/docs/Architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-cache-01.png
rename to src/assets/docs/architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-cache-01.png
diff --git a/src/assets/docs/Architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-cache-02.png b/src/assets/docs/architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-cache-02.png
similarity index 100%
rename from src/assets/docs/Architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-cache-02.png
rename to src/assets/docs/architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-cache-02.png
diff --git a/src/assets/docs/Architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-cache-03.png b/src/assets/docs/architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-cache-03.png
similarity index 100%
rename from src/assets/docs/Architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-cache-03.png
rename to src/assets/docs/architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-cache-03.png
diff --git a/src/assets/docs/Architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-cache-04.png b/src/assets/docs/architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-cache-04.png
similarity index 100%
rename from src/assets/docs/Architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-cache-04.png
rename to src/assets/docs/architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-cache-04.png
diff --git a/src/assets/docs/Architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-cache-05.png b/src/assets/docs/architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-cache-05.png
similarity index 100%
rename from src/assets/docs/Architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-cache-05.png
rename to src/assets/docs/architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-cache-05.png
diff --git a/src/assets/docs/Architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-client-01.png b/src/assets/docs/architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-client-01.png
similarity index 100%
rename from src/assets/docs/Architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-client-01.png
rename to src/assets/docs/architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-client-01.png
diff --git a/src/assets/docs/Architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-client-02.png b/src/assets/docs/architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-client-02.png
similarity index 100%
rename from src/assets/docs/Architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-client-02.png
rename to src/assets/docs/architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-client-02.png
diff --git a/src/assets/docs/Architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-client-03.png b/src/assets/docs/architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-client-03.png
similarity index 100%
rename from src/assets/docs/Architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-client-03.png
rename to src/assets/docs/architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-client-03.png
diff --git a/src/assets/docs/Architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-ha-01.png b/src/assets/docs/architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-ha-01.png
similarity index 100%
rename from src/assets/docs/Architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-ha-01.png
rename to src/assets/docs/architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-ha-01.png
diff --git a/src/assets/docs/Architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-ha-02.png b/src/assets/docs/architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-ha-02.png
similarity index 100%
rename from src/assets/docs/Architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-ha-02.png
rename to src/assets/docs/architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-ha-02.png
diff --git a/src/assets/docs/Architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-ha-03.png b/src/assets/docs/architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-ha-03.png
similarity index 100%
rename from src/assets/docs/Architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-ha-03.png
rename to src/assets/docs/architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-ha-03.png
diff --git a/src/assets/docs/Architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-ha-04.png b/src/assets/docs/architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-ha-04.png
similarity index 100%
rename from src/assets/docs/Architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-ha-04.png
rename to src/assets/docs/architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-ha-04.png
diff --git a/src/assets/docs/Architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-listener-01.png b/src/assets/docs/architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-listener-01.png
similarity index 100%
rename from src/assets/docs/Architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-listener-01.png
rename to src/assets/docs/architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-listener-01.png
diff --git a/src/assets/docs/Architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-listener-02.png b/src/assets/docs/architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-listener-02.png
similarity index 100%
rename from src/assets/docs/Architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-listener-02.png
rename to src/assets/docs/architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-listener-02.png
diff --git a/src/assets/docs/Architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-listener-03.png b/src/assets/docs/architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-listener-03.png
similarity index 100%
rename from src/assets/docs/Architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-listener-03.png
rename to src/assets/docs/architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-listener-03.png
diff --git a/src/assets/docs/Architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-persistence-01.png b/src/assets/docs/architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-persistence-01.png
similarity index 100%
rename from src/assets/docs/Architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-persistence-01.png
rename to src/assets/docs/architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-persistence-01.png
diff --git a/src/assets/docs/Architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-search-01.png b/src/assets/docs/architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-search-01.png
similarity index 100%
rename from src/assets/docs/Architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-search-01.png
rename to src/assets/docs/architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-search-01.png
diff --git a/src/assets/docs/Architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-search-02.png b/src/assets/docs/architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-search-02.png
similarity index 100%
rename from src/assets/docs/Architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-search-02.png
rename to src/assets/docs/architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-search-02.png
diff --git a/src/assets/docs/Architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-search-03.png b/src/assets/docs/architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-search-03.png
similarity index 100%
rename from src/assets/docs/Architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-search-03.png
rename to src/assets/docs/architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-search-03.png
diff --git a/src/assets/docs/Architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-search-04.png b/src/assets/docs/architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-search-04.png
similarity index 100%
rename from src/assets/docs/Architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-search-04.png
rename to src/assets/docs/architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-search-04.png
diff --git a/src/assets/docs/Architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-search-05.png b/src/assets/docs/architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-search-05.png
similarity index 100%
rename from src/assets/docs/Architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-search-05.png
rename to src/assets/docs/architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-search-05.png
diff --git a/src/assets/docs/Architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-search-06.png b/src/assets/docs/architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-search-06.png
similarity index 100%
rename from src/assets/docs/Architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-search-06.png
rename to src/assets/docs/architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-search-06.png
diff --git a/src/assets/docs/Architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-search-07.png b/src/assets/docs/architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-search-07.png
similarity index 100%
rename from src/assets/docs/Architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-search-07.png
rename to src/assets/docs/architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-search-07.png
diff --git a/src/assets/docs/Architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-service-01.png b/src/assets/docs/architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-service-01.png
similarity index 100%
rename from src/assets/docs/Architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-service-01.png
rename to src/assets/docs/architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-service-01.png
diff --git a/src/assets/docs/Architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-service-02.png b/src/assets/docs/architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-service-02.png
similarity index 100%
rename from src/assets/docs/Architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-service-02.png
rename to src/assets/docs/architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-service-02.png
diff --git a/src/assets/docs/Architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-service-03.png b/src/assets/docs/architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-service-03.png
similarity index 100%
rename from src/assets/docs/Architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-service-03.png
rename to src/assets/docs/architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-service-03.png
diff --git a/src/assets/docs/Architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-service-04.png b/src/assets/docs/architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-service-04.png
similarity index 100%
rename from src/assets/docs/Architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-service-04.png
rename to src/assets/docs/architecture/Public_Enhancement_Service/ContextService/linkis-contextservice-service-04.png
diff --git a/src/assets/docs/Architecture/Add_an_EngineConn/add_an_EngineConn_flow_chart.png b/src/assets/docs/architecture/add_an_engineConn_flow_chart.png
similarity index 100%
rename from src/assets/docs/Architecture/Add_an_EngineConn/add_an_EngineConn_flow_chart.png
rename to src/assets/docs/architecture/add_an_engineConn_flow_chart.png
diff --git a/src/assets/docs/Architecture/bml-02.png b/src/assets/docs/architecture/bml_02.png
similarity index 100%
rename from src/assets/docs/Architecture/bml-02.png
rename to src/assets/docs/architecture/bml_02.png
diff --git a/src/assets/docs/Architecture/linkis-engineConnPlugin-01.png b/src/assets/docs/architecture/linkis_engineconnplugin_01.png
similarity index 100%
rename from src/assets/docs/Architecture/linkis-engineConnPlugin-01.png
rename to src/assets/docs/architecture/linkis_engineconnplugin_01.png
diff --git a/src/assets/docs/Architecture/linkis-intro-01.png b/src/assets/docs/architecture/linkis_intro_01.png
similarity index 100%
rename from src/assets/docs/Architecture/linkis-intro-01.png
rename to src/assets/docs/architecture/linkis_intro_01.png
diff --git a/src/assets/docs/Architecture/linkis-intro-02.png b/src/assets/docs/architecture/linkis_intro_02.png
similarity index 100%
rename from src/assets/docs/Architecture/linkis-intro-02.png
rename to src/assets/docs/architecture/linkis_intro_02.png
diff --git a/src/assets/docs/Architecture/linkis-microservice-gov-01.png b/src/assets/docs/architecture/linkis_microservice_gov_01.png
similarity index 100%
rename from src/assets/docs/Architecture/linkis-microservice-gov-01.png
rename to src/assets/docs/architecture/linkis_microservice_gov_01.png
diff --git a/src/assets/docs/Architecture/linkis-microservice-gov-03.png b/src/assets/docs/architecture/linkis_microservice_gov_03.png
similarity index 100%
rename from src/assets/docs/Architecture/linkis-microservice-gov-03.png
rename to src/assets/docs/architecture/linkis_microservice_gov_03.png
diff --git a/src/assets/docs/Architecture/linkis-publicService-01.png b/src/assets/docs/architecture/linkis_publicservice_01.png
similarity index 100%
rename from src/assets/docs/Architecture/linkis-publicService-01.png
rename to src/assets/docs/architecture/linkis_publicservice_01.png
diff --git a/src/assets/docs/Architecture/PublicEnhencementArchitecture.png b/src/assets/docs/architecture/publicenhencement_architecture.png
similarity index 100%
rename from src/assets/docs/Architecture/PublicEnhencementArchitecture.png
rename to src/assets/docs/architecture/publicenhencement_architecture.png
diff --git a/src/assets/docs/manual/queue-set.png b/src/assets/docs/manual/queue_set.png
similarity index 100%
rename from src/assets/docs/manual/queue-set.png
rename to src/assets/docs/manual/queue_set.png
diff --git a/src/assets/docs/manual/sparksql-run.png b/src/assets/docs/manual/sparksql_run.png
similarity index 100%
rename from src/assets/docs/manual/sparksql-run.png
rename to src/assets/docs/manual/sparksql_run.png
diff --git a/src/docs/architecture/AddEngineConn_en.md b/src/docs/architecture/AddEngineConn_en.md
new file mode 100644
index 0000000..5ce15fe
--- /dev/null
+++ b/src/docs/architecture/AddEngineConn_en.md
@@ -0,0 +1,105 @@
+# How to add an EngineConn
+
+Adding EngineConn is one of the core processes of the computing task preparation phase of Linkis computing governance. It mainly includes the following steps. First, client side (Entrance or user client) initiates a request for a new EngineConn to LinkisManager . Then LinkisManager initiates a request to EngineConnManager to start EngineConn based on demands and label rules. Finally,  LinkisManager returns the usable EngineConn to the client side.
+
+Based on the figure below, let's explain the whole process in detail:
+
+![Process of adding a EngineConn](../../assets/docs/architecture/add_an_engineConn_flow_chart.png)
+
+## 1. LinkisManger receives the requests from client side
+
+**Glossary:**
+
+- LinkisManager: The management center of Linkis computing governance capabilities. Its main responsibilities are:
+  1. Based on multi-level combined tags, provide users with available EngineConn after complex routing, resource management and load balancing.
+
+  2. Provide EC and ECM full life cycle management capabilities.
+
+  3. Provide users with multi-Yarn cluster resource management functions based on multi-level combined tags. It is mainly divided into three modules: AppManager, ResourceManager and LabelManager , which can support multi-active deployment and have the characteristics of high availability and easy expansion.
+
+After the AM module receives the Client’s new EngineConn request, it first checks the parameters of the request to determine the validity of the request parameters. Secondly, selects the most suitable EngineConnManager (ECM) through complex rules for use in the subsequent EngineConn startup. Next, it will apply to RM for the resources needed to start the EngineConn, Finally, it will request the ECM to create an EngineConn.
+
+The four steps will be described in detail below.
+
+### 1. Request parameter verification
+
+After the AM module receives the engine creation request, it will check the parameters. First, it will check the permissions of the requesting user and the creating user, and then check the Label attached to the request. Since in the subsequent creation process of AM, Label will be used to find ECM and perform resource information recording, etc, you need to ensure that you have the necessary Label. At this stage, you must bring the Label with UserCreatorLabel (For example: hadoop-IDE) a [...]
+
+### 2. Select  a EngineConnManager(ECM)
+
+ECM selection is mainly to complete the Label passed through the client to select a suitable ECM service to start EngineConn. In this step, first, the LabelManager will be used to search in the registered ECM through the Label passed by the client, and return in the order according to the label matching degree. After obtaining the registered ECM list, rules will be selected for these ECMs. At this stage, rules such as availability check, resource surplus, and machine load have been imple [...]
+
+### 3. Apply resources required for EngineConn
+
+1. After obtaining the assigned ECM, AM will then request how many resources will be used by the client's engine creation request by calling the EngineConnPluginServer service. Here, the resource request will be encapsulated, mainly including Label, the EngineConn startup parameters passed by the Client, and the user configuration parameters obtained from the Configuration module. The resource information is obtained by calling the ECP service through RPC.
+
+2. After the EngineConnPluginServer service receives the resource request, it will first find the corresponding engine tag through the passed tag, and select the EngineConnPlugin of the corresponding engine through the engine tag. Then use EngineConnPlugin's resource generator to calculate the engine startup parameters passed in by the client, calculate the resources required to apply for a new EngineConn this time, and then return it to LinkisManager. 
+
+   **Glossary:**
+
+- EgineConnPlugin: It is the interface that Linkis must implement when connecting a new computing storage engine. This interface mainly includes several capabilities that this EngineConn must provide during the startup process, including EngineConn resource generator, EngineConn startup command generator, EngineConn engine connection Device. Please refer to the Spark engine implementation class for the specific implementation: [SparkEngineConnPlugin](https://github.com/WeBankFinTech/Link [...]
+- EngineConnPluginServer: It is a microservice that loads all the EngineConnPlugins and provides externally the required resource generation capabilities of EngineConn and EngineConn's startup command generation capabilities.
+- EngineConnResourceFactory: Calculate the total resources needed when EngineConn starts this time through the parameters passed in.
+- EngineConnLaunchBuilder: Through the incoming parameters, a startup command of the EngineConn is generated to provide the ECM to start the engine.
+3. After AM obtains the engine resources, it will then call the RM service to apply for resources. The RM service will use the incoming Label, ECM, and the resources applied for this time to make resource judgments. First, it will judge whether the resources of the client corresponding to the Label are sufficient, and then judge whether the resources of the ECM service are sufficient, if the resources are sufficient, the resource application is approved this time, and the resources of th [...]
+
+### 4. Request ECM for engine creation
+
+1. After completing the resource application for the engine, AM will encapsulate the engine startup request, send it to the corresponding ECM via RPC for service startup, and obtain the instance object of EngineConn.
+2. AM will then determine whether EngineConn is successfully started and become available through the reported information of EngineConn. If it is, the result will be returned, and the process of adding an engine this time will end.
+
+## 2. ECM initiates EngineConn
+
+**Glossary:**
+
+- EngineConnManager: EngineConn's manager. Provides engine life-cycle management, and at the same time reports load information and its own health status to RM.
+- EngineConnBuildRequest: The start engine command passed by LinkisManager to ECM, which encapsulates all tag information, required resources and some parameter configuration information of the engine.
+- EngineConnLaunchRequest: Contains the BML materials, environment variables, ECM required local environment variables, startup commands and other information required to start an EngineConn, so that ECM can build a complete EngineConn startup script based on this.
+
+After ECM receives the EngineConnBuildRequest command passed by LinkisManager, it is mainly divided into three steps to start EngineConn: 
+
+1. Request EngineConnPluginServer to obtain EngineConnLaunchRequest encapsulated by EngineConnPluginServer. 
+2.  Parse EngineConnLaunchRequest and encapsulate it into EngineConn startup script.
+3.  Execute startup script to start EngineConn.
+
+### 2.1 EngineConnPluginServer encapsulates EngineConnLaunchRequest
+
+Get the EngineConn type and corresponding version that actually needs to be started through the label information of EngineConnBuildRequest, get the EngineConnPlugin of the EngineConn type from the memory of EngineConnPluginServer, and convert the EngineConnBuildRequest into EngineConnLaunchRequest through the EngineConnLaunchBuilder of the EngineConnPlugin.
+
+### 2.2 Encapsulate EngineConn startup script
+
+After the ECM obtains the EngineConnLaunchRequest, it downloads the BML materials in the EngineConnLaunchRequest to the local, and checks whether the local necessary environment variables required by the EngineConnLaunchRequest exist. After the verification is passed, the EngineConnLaunchRequest is encapsulated into an EngineConn startup script.
+
+### 2.3 Execute startup script
+
+Currently, ECM only supports Bash commands for Unix systems, that is, only supports Linux systems to execute the startup script.
+
+Before startup, the sudo command is used to switch to the corresponding requesting user to execute the script to ensure that the startup user (ie, JVM user) is the requesting user on the Client side.
+
+After the startup script is executed, ECM will monitor the execution status and execution log of the script in real time. Once the execution status returns to non-zero, it will immediately report EngineConn startup failure to LinkisManager and the entire process is complete; otherwise, it will keep monitoring the log and status of the startup script until The script execution is complete.
+
+## 3. EngineConn initialization
+
+After ECM executed EngineConn's startup script, EngineConn microservice was officially launched.
+
+**Glossary:**
+
+- EngineConn microservice: Refers to the actual microservices that include an EngineConn and one or more Executors to provide computing power for computing tasks. When we talk about adding an EngineConn, we actually mean adding an EngineConn microservice.
+- EngineConn: The engine connector is the actual connection unit with the underlying computing storage engine, and contains the session information with the actual engine. The difference between it and Executor is that EngineConn only acts as a connection and a client, and does not actually perform calculations. For example, SparkEngineConn, its session information is SparkSession.
+- Executor: As a real computing storage scenario executor, it is the actual computing storage logic execution unit. It abstracts the various capabilities of EngineConn and provides multiple different architectural capabilities such as interactive execution, subscription execution, and responsive execution.
+
+The initialization of EngineConn microservices is generally divided into three stages:
+
+1. Initialize the EngineConn of the specific engine. First use the command line parameters of the Java main method to encapsulate an EngineCreationContext that contains relevant label information, startup information, and parameter information, and initialize EngineConn through EngineCreationContext to complete the establishment of the connection between EngineConn and the underlying Engine, such as: SparkEngineConn will initialize one at this stage SparkSession is used to establish a co [...]
+2. Initialize the Executor. After the EngineConn is initialized, the corresponding Executor will be initialized according to the actual usage scenario to provide service capabilities for subsequent users. For example, the SparkEngineConn in the interactive computing scenario will initialize a series of Executors that can be used to submit and execute SQL, PySpark, and Scala code capabilities, and support the Client to submit and execute SQL, PySpark, Scala and other codes to the SparkEng [...]
+3. Report the heartbeat to LinkisManager regularly, and wait for EngineConn to exit. When the underlying engine corresponding to EngineConn is abnormal, or exceeds the maximum idle time, or Executor is executed, or the user manually kills, the EngineConn automatically ends and exits.
+
+----
+
+At this point, The process of how to add a new EngineConn is basically over. Finally, let's make a summary:
+
+- The client initiates a request for adding EngineConn to LinkisManager.
+- LinkisManager checks the legitimacy of the parameters, first selects the appropriate ECM according to the label, then confirms the resources required for this new EngineConn according to the user's request, applies for resources from the RM module of LinkisManager, and requires ECM to start a new EngineConn as required after the application is passed.
+- ECM first requests EngineConnPluginServer to obtain an EngineConnLaunchRequest containing BML materials, environment variables, ECM required local environment variables, startup commands and other information needed to start an EngineConn, and then encapsulates the startup script of EngineConn, and finally executes the startup script to start the EngineConn.
+- EngineConn initializes the EngineConn of a specific engine, and then initializes the corresponding Executor according to the actual usage scenario, and provides service capabilities for subsequent users. Finally, report the heartbeat to LinkisManager regularly, and wait for the normal end or termination by the user.
+
diff --git a/src/docs/architecture/AddEngineConn_zh.md b/src/docs/architecture/AddEngineConn_zh.md
new file mode 100644
index 0000000..bb6a88f
--- /dev/null
+++ b/src/docs/architecture/AddEngineConn_zh.md
@@ -0,0 +1,111 @@
+# EngineConn新增流程
+
+EngineConn的新增,是Linkis计算治理的计算任务准备阶段的核心流程之一。它主要包括了Client端(Entrance或用户客户端)向LinkisManager发起一个新增EngineConn的请求,LinkisManager为用户按需、按标签规则,向EngineConnManager发起一个启动EngineConn的请求,并等待EngineConn启动完成后,将可用的EngineConn返回给Client的整个流程。
+
+如下图所示,接下来我们来详细说明一下整个流程:
+
+![EngineConn新增流程](../../assets/docs/architecture/add_an_engineConn_flow_chart.png)
+
+## 一、LinkisManager接收客户端请求
+
+**名词解释**:
+
+- LinkisManager:是Linkis计算治理能力的管理中枢,主要的职责为:
+  1. 基于多级组合标签,为用户提供经过复杂路由、资源管控和负载均衡后的可用EngineConn;
+  
+  2. 提供EC和ECM的全生命周期管理能力;
+  
+  3. 为用户提供基于多级组合标签的多Yarn集群资源管理功能。主要分为 AppManager(应用管理器)、ResourceManager(资源管理器)、LabelManager(标签管理器)三大模块,能够支持多活部署,具备高可用、易扩展的特性。
+
+&nbsp;&nbsp;&nbsp;&nbsp;AM模块接收到Client的新增EngineConn请求后,首先会对请求做参数校验,判断请求参数的合法性;其次是通过复杂规则选中一台最合适的EngineConnManager(ECM),以用于后面的EngineConn启动;接下来会向RM申请启动该EngineConn需要的资源;最后是向ECM请求创建EngineConn。
+
+下面将对四个步骤进行详细说明。
+
+### 1. 请求参数校验
+
+&nbsp;&nbsp;&nbsp;&nbsp;AM模块在接受到引擎创建请求后首先会做参数判断,首先会做请求用户和创建用户的权限判断,接着会对请求带上的Label进行检查。因为在AM后续的创建流程当中,Label会用来查找ECM和进行资源信息记录等,所以需要保证拥有必须的Label,现阶段一定需要带上的Label有UserCreatorLabel(例:hadoop-IDE)和EngineTypeLabel(例:spark-2.4.3)。
+
+### 2. EngineConnManager(ECM)选择
+
+&nbsp;&nbsp;&nbsp;&nbsp;ECM选择主要是完成通过客户端传递过来的Label去选择一个合适的ECM服务去启动EngineConn。这一步中首先会通过LabelManager去通过客户端传递过来的Label去注册的ECM中进行查找,通过按照标签匹配度进行顺序返回。在获取到注册的ECM列表后,会对这些ECM进行规则选择,现阶段已经实现有可用性检查、资源剩余、机器负载等规则。通过规则选择后,会将标签最匹配、资源最空闲、负载低的ECM进行返回。
+
+### 3. EngineConn资源申请
+
+1. 在获取到分配的ECM后,AM接着会通过调用EngineConnPluginServer服务请求本次客户端的引擎创建请求会使用多少的资源,这里会通过封装资源请求,主要包含Label、Client传递过来的EngineConn的启动参数、以及从Configuration模块获取到用户配置参数,通过RPC调用ECP服务去获取本次的资源信息。
+
+2. EngineConnPluginServer服务在接收到资源请求后,会先通过传递过来的标签找到对应的引擎标签,通过引擎标签选择对应引擎的EngineConnPlugin。然后通过EngineConnPlugin的资源生成器,对客户端传入的引擎启动参数进行计算,算出本次申请新EngineConn所需的资源,然后返回给LinkisManager。
+   
+   **名词解释:**
+- EgineConnPlugin:是Linkis对接一个新的计算存储引擎必须要实现的接口,该接口主要包含了这种EngineConn在启动过程中必须提供的几个接口能力,包括EngineConn资源生成器、EngineConn启动命令生成器、EngineConn引擎连接器。具体的实现可以参考Spark引擎的实现类:[SparkEngineConnPlugin](https://github.com/WeBankFinTech/Linkis/blob/master/linkis-engineconn-plugins/engineconn-plugins/spark/src/main/scala/com/webank/wedatasphere/linkis/engineplugin/spark/SparkEngineConnPlugin.scala)。
+
+- EngineConnPluginServer:是加载了所有的EngineConnPlugin,对外提供EngineConn的所需资源生成能力和EngineConn的启动命令生成能力的微服务。
+
+- EngineConnPlugin资源生成器(EngineConnResourceFactory):通过传入的参数,计算出本次EngineConn启动时需要的总资源。
+
+- EngineConn启动命令生成器(EngineConnLaunchBuilder):通过传入的参数,生成该EngineConn的启动命令,以提供给ECM去启动引擎。
+3. AM在获取到引擎资源后,会接着调用RM服务去申请资源,RM服务会通过传入的Label、ECM、本次申请的资源,去进行资源判断。首先会判断客户端对应Label的资源是否足够,然后再会判断ECM服务的资源是否足够,如果资源足够,则本次资源申请通过,并对对应的Label进行资源的加减。
+
+### 4. 请求ECM创建引擎
+
+1. 在完成引擎的资源申请后,AM会封装引擎启动的请求,通过RPC发送给对应的ECM进行服务启动,并获取到EngineConn的实例对象;
+2. AM接着会去通过EngineConn的上报信息判断EngineConn是否启动成功变成可用状态,如果是就会将结果进行返回,本次新增引擎的流程也就结束。
+
+## 二、 ECM启动EngineConn
+
+名词解释:
+
+- EngineConnManager(ECM):EngineConn的管理器,提供引擎的生命周期管理,同时向RM汇报负载信息和自身的健康状况。
+
+- EngineConnBuildRequest:LinkisManager传递给ECM的启动引擎命令,里面封装了该引擎的所有标签信息、所需资源和一些参数配置信息。
+
+- EngineConnLaunchRequest:包含了启动一个EngineConn所需的BML物料、环境变量、ECM本地必需环境变量、启动命令等信息,让ECM可以依此构建出一个完整的EngineConn启动脚本。
+
+ECM接收到LinkisManager传递过来的EngineConnBuildRequest命令后,主要分为三步来启动EngineConn:1. 请求EngineConnPluginServer,获取EngineConnPluginServer封装出的EngineConnLaunchRequest;2. 解析EngineConnLaunchRequest,封装成EngineConn启动脚本;3. 执行启动脚本,启动EngineConn。
+
+### 2.1 EngineConnPluginServer封装EngineConnLaunchRequest
+
+通过EngineConnBuildRequest的标签信息,拿到实际需要启动的EngineConn类型和对应版本,从EngineConnPluginServer的内存中获取到该EngineConn类型的EngineConnPlugin,通过该EngineConnPlugin的EngineConnLaunchBuilder,将EngineConnBuildRequest转换成EngineConnLaunchRequest。
+
+### 2.2 封装EngineConn启动脚本
+
+ECM获取到EngineConnLaunchRequest之后,将EngineConnLaunchRequest中的BML物料下载到本地,并检查EngineConnLaunchRequest要求的本地必需环境变量是否存在,校验通过后,将EngineConnLaunchRequest封装成一个EngineConn启动脚本
+
+### 2.3 执行启动脚本
+
+目前ECM只对Unix系统做了Bash命令的支持,即只支持Linux系统执行该启动脚本。
+
+启动前,会通过sudo命令,切换到对应的请求用户去执行该脚本,确保启动用户(即JVM用户)为Client端的请求用户。
+
+执行该启动脚本后,ECM会实时监听脚本的执行状态和执行日志,一旦执行状态返回非0,则立马向LinkisManager汇报EngineConn启动失败,整个流程完成;否则则一直监听启动脚本的日志和状态,直到该脚本执行完成。
+
+## 三、EngineConn初始化
+
+ECM执行了EngineConn的启动脚本后,EngineConn微服务正式启动。
+
+名词解释:
+
+- EngineConn微服务:指包含了一个EngineConn、一个或多个Executor,用于对计算任务提供计算能力的实际微服务。我们说的新增一个EngineConn,其实指的就是新增一个EngineConn微服务。
+
+- EngineConn:引擎连接器,是与底层计算存储引擎的实际连接单元,包含了与实际引擎的会话信息。它与Executor的差别,是EngineConn只是起到一个连接、一个客户端的作用,并不真正的去执行计算。如SparkEngineConn,其会话信息为SparkSession。
+
+- Executor:执行器,作为真正的计算存储场景执行器,是实际的计算存储逻辑执行单元,对EngineConn各种能力的具体抽象,提供交互式执行、订阅式执行、响应式执行等多种不同的架构能力。
+
+EngineConn微服务的初始化一般分为三个阶段:
+
+1. 初始化具体引擎的EngineConn。先通过Java main方法的命令行参数,封装出一个包含了相关标签信息、启动信息和参数信息的EngineCreationContext,通过EngineCreationContext初始化EngineConn,完成EngineConn与底层Engine的连接建立,如:SparkEngineConn会在该阶段初始化一个SparkSession,用于与一个Spark application建立了连通关系。
+
+2. 初始化Executor。EngineConn初始化之后,接下来会根据实际的使用场景,初始化对应的Executor,为接下来的用户使用,提供服务能力。比如:交互式计算场景的SparkEngineConn,会初始化一系列可以用于提交执行SQL、PySpark、Scala代码能力的Executor,支持Client往该SparkEngineConn提交执行SQL、PySpark、Scala等代码。
+
+3. 定时向LinkisManager汇报心跳,并等待EngineConn结束退出。当EngineConn对应的底层引擎异常、或是超过最大空闲时间、或是Executor执行完成、或是用户手动kill时,该EngineConn自动结束退出。
+
+----
+
+到了这里,EngineConn的新增流程就基本结束了,最后我们再来总结一下EngineConn的新增流程:
+
+- 客户端向LinkisManager发起新增EngineConn的请求;
+
+- LinkisManager校验参数合法性,先是根据标签选择合适的ECM,再根据用户请求确认本次新增EngineConn所需的资源,向LinkisManager的RM模块申请资源,申请通过后要求ECM按要求启动一个新的EngineConn;
+
+- ECM先请求EngineConnPluginServer获取一个包含了启动一个EngineConn所需的BML物料、环境变量、ECM本地必需环境变量、启动命令等信息的EngineConnLaunchRequest,然后封装出EngineConn的启动脚本,最后执行启动脚本,启动该EngineConn;
+
+- EngineConn初始化具体引擎的EngineConn,然后根据实际的使用场景,初始化对应的Executor,为接下来的用户使用,提供服务能力。最后定时向LinkisManager汇报心跳,等待正常结束或被用户终止。
diff --git a/src/docs/architecture/DifferenceBetween1.0&0.x_en.md b/src/docs/architecture/DifferenceBetween1.0&0.x_en.md
new file mode 100644
index 0000000..8333cac
--- /dev/null
+++ b/src/docs/architecture/DifferenceBetween1.0&0.x_en.md
@@ -0,0 +1,50 @@
+## 1. Brief Description
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;First of all, the Entrance and EngineConnManager (formerly EngineManager) services under the Linkis1.0 architecture are completely unrelated to the engine. That is, under the Linkis1.0 architecture, each engine does not need to be implemented and started the corresponding Entrance and EngineConnManager, and Linkis1.0’s Each Entrance and EngineConnManager can be shared by all engines.  
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;Secondly, Linkis1.0 added the Linkis-Manager service to provide external AppManager (application management), ResourceManager (resource management, the original ResourceManager service) and LabelManager (label management) capabilities.  
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;Then, in order to reduce the difficulty of implementing and deploying a new engine, Linkis 1.0 re-architects a module called EngineConnPlugin. Each new engine only needs to implement the EngineConnPlugin interface.Linkis EngineConnPluginServer supports dynamic loading of EngineConnPlugin (new engine) in the form of a plug-in. Once EngineConnPluginServer is successfully loaded, EngineConnManager can quickly start an instance of the engine fo [...]
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;Finally, all the microservices of Linkis are summarized and classified, which are generally divided into three major levels: public enhancement services, computing governance services and microservice governance services, from the code hierarchy, microservice naming and installation directory structure, etc. To standardize the microservice system of Linkis1.0.  
+##  2. Main Feature
+1. **Strengthen computing governance**, Linkis 1.0 mainly strengthens the comprehensive management and control capabilities of computing governance from engine management, label management, ECM management, and resource management. It is based on the powerful management and control design concept of labeling. This makes Linkis 1.0 a solid step towards multi-IDC, multi-cluster, and multi-container.  
+2. **Simplify user implementation of new engines**, EnginePlugin is used to integrate the related interfaces and classes that need to be implemented to implement a new engine, as well as the Entrance-EngineManager-Engine three-tier module system that needs to be split into one interface. , Simplify the process and code for users to implement the new engine, so that as long as one class is implemented, a new engine can be connected.  
+3. **Full-stack computing storage engine support**, to achieve full coverage support for computing request scenarios (such as Spark), storage request scenarios (such as HBase), and resident cluster services (such as SparkStreaming).  
+4. **Improved advanced computing strategy capability**, add Orchestrator to implement rich computing task management strategies, and support tag-based analysis and orchestration.  
+## 3. Service Comparison
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;Please refer to the following two pictures:  
+![Linkis0.X Service List](../../assets/docs/architecture/Linkis0.X_services_list.png)  
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;The list of Linkis1.0 microservices is as follows:  
+![Linkis1.0 Service List](../../assets/docs/architecture/Linkis1.0_services_list.png)  
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;From the above two figures, Linkis1.0 divides services into three types of services: Computing Governance (CG)/Micro Service Governance (MG)/Public Enhanced Service (PS). among them:  
+1. A major change in computing governance is that Entrance and EngineConnManager services are no longer related to engines. To implement a new engine, only the EngineConnPlugin plug-in needs to be implemented. EngineConnPluginServer will dynamically load the EngineConnPlugin plug-in to achieve engine hot-plug update;
+2. Another major change in computing governance is that LinkisManager, as the management brain of Linkis, abstracts and defines AppManager (application management), ResourceManager (resource management) and LabelManager (label management);
+3. Microservice management service, merged and unified the Eureka and Gateway services in the 0.X part, and enhanced the functions of the Gateway service to support routing and forwarding according to Label;
+4. Public enhancement services, mainly to optimize and unify the BML services/context services/data source services/public services of the 0.X part, which is convenient for everyone to manage and view.  
+## 4. Introduction To Linkis Manager
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;As the management brain of Linkis, Linkis Manager is mainly composed of AppManager, ResourceManager and LabelManager.  
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;ResourceManager not only has Linkis0.X's resource management capabilities for Yarn and Linkis EngineManager, but also provides tag-based multi-level resource allocation and recycling capabilities, allowing ResourceManager to have full resource management capabilities across clusters and across computing resource types.  
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;AppManager will coordinate and manage all EngineConnManager and EngineConn, and the life cycle of EngineConn application, reuse, creation, switching, and destruction will be handed over to AppManager for management.  
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;The LabelManager will provide cross-IDC and cross-cluster EngineConn and EngineConnManager routing and management capabilities based on multi-level combined tags.  
+## 5. Introduction To Linkis EngineConnPlugin
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;EngineConnPlugin is mainly used to reduce the cost of access and deployment of new computing storage. It truly enables users to “just need to implement a class to connect to a new computing storage engine; just execute a script to quickly deploy a new engine ".  
+### 5.1 New Engine Implementation Comparison
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;The following are the relevant interfaces and classes that the user Linkis0.X needs to implement to implement a new engine:  
+![Linkis0.X How to implement a brand new engine](../../assets/docs/architecture/Linkis0.X_newengine_architecture.png)  
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;The following is Linkis 1.0.0, which implements a new engine, the interfaces and classes that users need to implement:  
+![Linkis1.0 How to implement a brand new engine](../../assets/docs/architecture/Linkis1.0_newengine_architecture.png)  
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;Among them, EngineConnResourceFactory and EngineLaunchBuilder are not required to implement interfaces, and only EngineConnFactory is required to implement interfaces.  
+### 5.2 New engine startup process
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;EngineConnPlugin provides the Server service to start and load all engine plug-ins. The following is a new engine startup that accesses the entire process of EngineConnPlugin-Server:  
+![Linkis Engine start process](../../assets/docs/architecture/Linkis1.0_newengine_initialization.png)  
+## 6. Introduction To Linkis EngineConn
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;EngineConn, the original Engine module, is the actual unit for Linkis to connect and interact with the underlying computing storage engine, and is the basis for Linkis to provide computing and storage capabilities.  
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;EngineConn of Linkis1.0 is mainly composed of EngineConn and Executor. among them:  
+
+1. EngineConn is the connector, which contains the session information between the engine and the specific cluster. It only acts as a connection, a client, and does not actually perform calculations.  
+
+2. Executor is the executor. As a real computing scene executor, it is the actual computing logic execution unit, and it also abstracts various specific capabilities of the engine, such as providing various services such as locking, access status, and log acquisition.
+
+3. Executor is created by the session information in EngineConn. An engine type can support multiple different types of computing tasks, each corresponding to the implementation of an Executor, and the computing task will be submitted to the corresponding Executor for execution.  In this way, the same engine can provide different services according to different computing scenarios. For example, the permanent engine does not need to be locked after it is started, and the one-time engine d [...]
+
+4. The advantage of using the separation of Executor and EngineConn is that it can avoid the Receiver coupling business logic, and only retains the RPC communication function. Distribute services in multiple Executor modules, and abstract them into several categories of engines: interactive computing engines, streaming engines, disposable engines, etc., which may be used, and build a unified engine framework for later expansion.
+In this way, different types of engines can respectively load the required capabilities according to their needs, which greatly reduces the redundancy of engine implementation.  
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;As shown below:  
+![Linkis EngineConn Architecture diagram](../../assets/docs/architecture/Linkis1.0_engineconn_architecture.png)
diff --git a/src/docs/architecture/DifferenceBetween1.0&0.x_zh.md b/src/docs/architecture/DifferenceBetween1.0&0.x_zh.md
new file mode 100644
index 0000000..df41d45
--- /dev/null
+++ b/src/docs/architecture/DifferenceBetween1.0&0.x_zh.md
@@ -0,0 +1,98 @@
+## 1. 简述
+
+&nbsp;&nbsp;&nbsp;&nbsp;  首先,Linkis1.0 架构下的 Entrance 和 EngineConnManager(原EngineManager)服务与 **引擎** 已完全无关,即:
+                             在 Linkis1.0 架构下,每个引擎无需再配套实现并启动对应的 Entrance 和 EngineConnManager,Linkis1.0 的每个 Entrance 和 EngineConnManager 都可以给所有引擎共用。
+                          
+&nbsp;&nbsp;&nbsp;&nbsp;  其次,Linkis1.0 新增了Linkis-Manager服务用于对外提供 AppManager(应用管理)、ResourceManager(资源管理,原ResourceManager服务)和 LabelManager(标签管理)的能力。
+
+&nbsp;&nbsp;&nbsp;&nbsp;  然后,为了降低大家实现和部署一个新引擎的难度,Linkis 1.0 重新架构了一个叫 EngineConnPlugin 的模块,每个新引擎只需要实现 EngineConnPlugin 接口即可,
+Linkis EngineConnPluginServer 支持以插件的形式动态加载 EngineConnPlugin(新引擎),一旦 EngineConnPluginServer 加载成功,EngineConnManager 便可为用户快速启动一个该引擎实例。
+                          
+&nbsp;&nbsp;&nbsp;&nbsp;  最后,对Linkis的所有微服务进行了归纳分类,总体分为了三个大层次:公共增强服务、计算治理服务和微服务治理服务,从代码层级结构、微服务命名和安装目录结构等多个方面来规范Linkis1.0的微服务体系。
+
+
+##  2. 主要特点
+
+1.  **强化计算治理**,Linkis1.0主要从引擎管理、标签管理、ECM管理和资源管理等几个方面,全面强化了计算治理的综合管控能力,基于标签化的强大管控设计理念,使得Linkis1.0向多IDC化、多集群化、多容器化,迈出了坚实的一大步。
+
+2.  **简化用户实现新引擎**,EnginePlugin用于将原本实现一个新引擎,需要实现的相关接口和类,以及需要拆分的Entrance-EngineManager-Engine三层模块体系,融合到了一个接口之中,简化用户实现新引擎的流程和代码,真正做到只要实现一个类,就能接入一个新引擎。
+
+3.  **全栈计算存储引擎支持**,实现对计算请求场景(如Spark)、存储请求场景(如HBase)和常驻集群型服务(如SparkStreaming)的全面覆盖支持。
+
+4.  **高级计算策略能力改进**,新增Orchestrator实现丰富计算任务管理策略,且支持基于标签的解析和编排。
+
+5.  **安装部署改进**  优化一键安装脚本,支持容器化部署,简化用户配置。
+
+## 3. 服务对比
+
+&nbsp;&nbsp;&nbsp;&nbsp;  请参考以下两张图:
+
+&nbsp;&nbsp;&nbsp;&nbsp;  Linkis0.X 微服务列表如下:
+
+![Linkis0.X服务列表](../../assets/docs/architecture/Linkis0.X_services_list.png)
+
+&nbsp;&nbsp;&nbsp;&nbsp;  Linkis1.0 微服务列表如下:
+
+![Linkis1.0服务列表](../../assets/docs/architecture/Linkis1.0_services_list.png)
+
+&nbsp;&nbsp;&nbsp;&nbsp;  从上面两个图中看,Linkis1.0 将服务分为了三类服务:计算治理(英文缩写CG)/微服务治理(MG)/公共增强服务(PS)。其中:
+
+1. 计算治理的一大变化是,Entrance 和 EngineConnManager服务与引擎再不相关,实现一个新引擎只需实现 EngineConnPlugin插件即可,EngineConnPluginServer会动态加载 EngineConnPlugin 插件,做到引擎热插拔式更新;
+
+2. 计算治理的另一大变化是,LinkisManager作为 Linkis 的管理大脑,抽象和定义了 AppManager(应用管理)、ResourceManager(资源管理)和LabelManager(标签管理);
+
+3. 微服务治理服务,将0.X部分的Eureka和Gateway服务进行了归并统一,并对Gateway服务进行了功能增强,支持按照Label进行路由转发;
+
+4. 公共增强服务,主要将0.X部分的BML服务/上下文服务/数据源服务/公共服务进行了优化和归并统一,便于大家管理和查看。
+
+## 4. Linkis Manager简介
+
+&nbsp;&nbsp;&nbsp;&nbsp;  Linkis Manager 作为 Linkis 的管理大脑,主要由 AppManager、ResourceManager 和 LabelManager 组成。
+
+&nbsp;&nbsp;&nbsp;&nbsp;  ResourceManager 不仅具备 Linkis0.X 对 Yarn 和 Linkis EngineManager 的资源管理能力,还将提供基于标签的多级资源分配和回收能力,让 ResourceManager 具备跨集群、跨计算资源类型的全资源管理能力;
+
+&nbsp;&nbsp;&nbsp;&nbsp;  AppManager 将统筹管理所有的 EngineConnManager 和 EngineConn,EngineConn 的申请、复用、创建、切换、销毁等生命周期全交予 AppManager进行管理;
+
+&nbsp;&nbsp;&nbsp;&nbsp;  而 LabelManager 将基于多级组合标签,提供跨IDC、跨集群的 EngineConn 和 EngineConnManager 路由和管控能力;
+
+## 5. Linkis EngineConnPlugin简介
+
+&nbsp;&nbsp;&nbsp;&nbsp;  EngineConnPlugin 主要用于降低新计算存储的接入和部署成本,真正做到让用户“只需实现一个类,就能接入一个全新计算存储引擎;只需执行一下脚本,即可快速部署一个全新引擎”。
+
+### 5.1 新引擎实现对比
+
+&nbsp;&nbsp;&nbsp;&nbsp;  以下是用户Linkis0.X实现一个新引擎需要实现的相关接口和类:
+
+![Linkis0.X 如何实现一个全新引擎](../../assets/docs/architecture/Linkis0.X_newengine_architecture.png)
+
+&nbsp;&nbsp;&nbsp;&nbsp;  以下为Linkis1.0.0,实现一个新引擎,用户需实现的接口和类:
+
+![Linkis1.0 如何实现一个全新引擎](../../assets/docs/architecture/Linkis1.0_newengine_architecture.png)
+
+&nbsp;&nbsp;&nbsp;&nbsp;  其中EngineConnResourceFactory和EngineLaunchBuilder为非必需实现接口,只有EngineConnFactory为必需实现接口。
+
+### 5.2 新引擎启动流程
+
+&nbsp;&nbsp;&nbsp;&nbsp;  EngineConnPlugin 提供了 Server 服务,用于启动和加载所有的引擎插件,以下给出了一个新引擎启动,访问了 EngineConnPlugin-Server 的全部流程:
+
+![Linkis 引擎启动流程](../../assets/docs/architecture/Linkis1.0_newengine_initialization.png)
+
+## 6. Linkis EngineConn简介
+
+&nbsp;&nbsp;&nbsp;&nbsp;  EngineConn,即原 Engine 模块,作为 Linkis 与底层计算存储引擎进行连接和交互的实际单元,是 Linkis 提供计算存储能力的基础。
+
+&nbsp;&nbsp;&nbsp;&nbsp;  Linkis1.0 的 EngineConn 主要由 EngineConn 和 Executor构成。其中:
+
+a)	EngineConn 为连接器,包含引擎与具体集群的会话信息。它只是起到一个连接,一个客户端的作用,并不真正的去执行计算。
+
+b)	Executor 为执行器,作为真正的计算场景执行器,是实际的计算逻辑执行单元,也对引擎各种具体能力的抽象,例如提供加锁、访问状态、获取日志等多种不同的服务。
+
+c)	Executor 通过 EngineConn 中的会话信息进行创建,一个引擎类型可以支持多种不同种类的计算任务,每种对应一个 Executor 的实现,计算任务将被提交到对应的 Executor 进行执行。
+这样,同一个引擎能够根据不同的计算场景提供不同的服务。比如常驻式引擎启动后不需要加锁,一次性引擎启动后不需要支持 Receiver 和访问状态等。
+
+d)	采用 Executor 和 EngineConn 分离的方式的好处是,可以避免 Receiver 耦合业务逻辑,本身只保留 RPC 通信功能。将服务分散在多个 Executor 模块中,并且抽象成几大类引擎:交互式计算引擎、流式引擎、一次性引擎等等可能用到的,构建成统一的引擎框架,便于后期的扩充。
+这样不同类型引擎可以根据需要分别加载其中需要的能力,大大减少引擎实现的冗余。
+
+&nbsp;&nbsp;&nbsp;&nbsp;  如下图所示:
+
+![Linkis EngineConn架构图](../../assets/docs/architecture/Linkis1.0_engineconn_architecture.png)
diff --git a/src/docs/architecture/JobSubmission_en.md b/src/docs/architecture/JobSubmission_en.md
new file mode 100644
index 0000000..13c70f1
--- /dev/null
+++ b/src/docs/architecture/JobSubmission_en.md
@@ -0,0 +1,138 @@
+# Job submission, preparation and execution process
+
+The submission and execution of computing tasks (Job) is the core capability provided by Linkis. It almost colludes with all modules in the Linkis computing governance architecture and occupies a core position in Linkis.
+
+The whole process, starting at submitting user's computing tasks from the client and ending with returning final results, is divided into three stages: submission -> preparation -> executing. The details are shown in the following figure.
+
+![The overall flow chart of computing tasks](../../assets/docs/architecture/JobSubmission/overall.png)
+
+Among them:
+
+- Entrance, as the entrance to the submission stage, provides task reception, scheduling and job information forwarding capabilities. It is the unified entrance for all computing tasks. It will forward computing tasks to Orchestrator for scheduling and execution.
+- Orchestrator, as the entrance to the preparation phase, mainly provides job analysis, orchestration and execution capabilities.
+- Linkis Manager: The management center of computing governance capabilities. Its main responsibilities are as follow:
+
+  1. ResourceManager:Not only has the resource management capabilities of Yarn and Linkis EngineConnManager, but also provides tag-based multi-level resource allocation and recovery capabilities, allowing ResourceManager to have full resource management capabilities across clusters and across computing resource types;
+  2. AppManager:  Coordinate and manage all EngineConnManager and EngineConn, including the life cycle of EngineConn application, reuse, creation, switching, and destruction to AppManager for management;
+  3. LabelManager: Based on multi-level combined labels, it will provide label support for the routing and management capabilities of EngineConn and EngineConnManager across IDC and across clusters;
+  4. EngineConnPluginServer: Externally provides the resource generation capabilities required to start an EngineConn and EngineConn startup command generation capabilities.
+- EngineConnManager: It is the manager of EngineConn, which provides engine life-cycle management, and at the same time reports load information and its own health status to RM.
+- EngineConn: It is the actual connector between Linkis and the underlying computing storage engines. All user computing and storage tasks will eventually be submitted to the underlying computing storage engine by EngineConn. According to different user scenarios, EngineConn provides full-stack computing capability framework support for interactive computing, streaming computing, off-line computing, and data storage tasks.
+
+## 1. Submission Stage
+
+The submission phase is mainly the interaction of Client -> Linkis Gateway -> Entrance, and the process is as follows:
+
+![Flow chart of submission phase](../../assets/docs/architecture/JobSubmission/submission.png)
+
+1. First, the Client (such as the front end or the client) initiates a Job request, and the job request information is simplified as follows (for the specific usage of Linkis, please refer to [How to use Linkis](#/docs/manual/HowToUse)):
+```
+POST /api/rest_j/v1/entrance/submit
+```
+
+```json
+{
+    "executionContent": {"code": "show tables", "runType": "sql"},
+    "params": {"variable": {}, "configuration": {}},  //非必须
+    "source": {"scriptPath": "file:///1.hql"}, //非必须,仅用于记录代码来源
+    "labels": {
+        "engineType": "spark-2.4.3",  //指定引擎
+        "userCreator": "johnnwnag-IDE"  // 指定提交用户和提交系统
+    }
+}
+```
+
+2. After Linkis-Gateway receives the request, according to the serviceName in the URI ``/api/rest_j/v1/${serviceName}/.+``, it will confirm the microservice name for routing and forwarding. Here Linkis-Gateway will parse out the  name as entrance and  Job is forwarded to the Entrance microservice. It should be noted that if the user specifies a routing label, the Entrance microservice instance with the corresponding label will be selected for forwarding according to the routing label ins [...]
+3. After Entrance receives the Job request, it will first simply verify the legitimacy of the request, then use RPC to call JobHistory to persist the job information, and then encapsulate the Job request as a computing task, put it in the scheduling queue, and wait for it to be consumed by consumption thread.
+4. The scheduling queue will open up a consumption queue and a consumption thread for each group. The consumption queue is used to store the user computing tasks that have been preliminarily encapsulated. The consumption thread will continue to take computing tasks from the consumption queue for consumption in a FIFO manner. The current default grouping method is Creator + User (that is, submission system + user). Therefore, even if it is the same user, as long as it is a computing task  [...]
+5. After the consuming thread takes out the calculation task, it will submit the calculation task to Orchestrator, which officially enters the preparation phase.
+
+## 2. Preparation Stage
+
+There are two main processes in the preparation phase. One is to apply for an available EngineConn from LinkisManager to submit and execute the following computing tasks. The other is Orchestrator to orchestrate the computing tasks submitted by Entrance, and to convert a user's computing request into a physical execution tree and handed over to the execution phase where a computing task actually being executed. 
+
+#### 2.1 Apply to LinkisManager for available EngineConn
+
+If the user has a reusable EngineConn in LinkisManager, the EngineConn is directly locked and returned to Orchestrator, and the entire application process ends.
+
+How to define a reusable EngineConn? It refers to those that can match all the label requirements of the computing task, and the EngineConn's own health status is Healthy (the load is low and the actual status is Idle). Then, all the EngineConn that meets the conditions are sorted and selected according to the rules, and finally the best one is locked.
+
+If the user does not have a reusable EngineConn, a process to request a new EngineConn will be triggered at this time. Regarding the process, please refer to: [How to add an EngineConn](/#/docs/architecture/AddEngineConn).
+
+#### 2.2 Orchestrate a computing task
+
+Orchestrator is mainly responsible for arranging a computing task (JobReq) into a physical execution tree (PhysicalTree) that can be actually executed, and providing the execution capabilities of the Physical tree.
+
+Here we first focus on Orchestrator's computing task scheduling capabilities. A flow chart is shown below:
+
+![Orchestration flow chart](../../assets/docs/architecture/JobSubmission/orchestrate.png)
+
+The main process is as follows:
+
+- Converter: Complete the conversion of the JobReq (task request) submitted by the user to Orchestrator's ASTJob. This step will perform parameter check and information supplementation on the calculation task submitted by the user, such as variable replacement, etc.
+- Parser: Complete the analysis of ASTJob. Split ASTJob into an AST tree composed of ASTJob and ASTStage.
+- Validator: Complete the inspection and information supplement of ASTJob and ASTStage, such as code inspection, necessary Label information supplement, etc.
+- Planner: Convert an AST tree into a Logical tree. The Logical tree at this time has been composed of LogicalTask, which contains all the execution logic of the entire computing task.
+- Optimizer: Convert a Logical tree to a Physica tree and optimize the Physical tree.
+
+In a physical tree, the majority of nodes are computing strategy logic. Only the middle ExecTask truly encapsulates the execution logic which will be further submitted to and executed at EngineConn. As shown below:
+
+![Physical Tree](../../assets/docs/architecture/JobSubmission/physical_tree.png)
+
+Different computing strategies have different execution logics encapsulated by JobExecTask and StageExecTask in the Physical tree.
+
+The execution logic encapsulated by JobExecTask and StageExecTask in the Physical tree depends on the  specific type of computing strategy.
+
+For example, under the multi-active computing strategy, for a computing task submitted by a user, the execution logic submitted to EngineConn of different clusters for execution is encapsulated in two ExecTasks, and the related strategy logic is reflected in the parent node (StageExecTask(End)) of the two ExecTasks.
+
+Here, we take the multi-reading scenario under the multi-active computing strategy as an example.
+
+In multi-reading scenario, only one result of ExecTask is required to return. Once the result is returned , the Physical tree can be marked as successful. However, the Physical tree only has the ability to execute sequentially according to dependencies, and cannot terminate the execution of each node. Once a node is canceled or fails to execute, the entire Physical tree will be marked as failure. At this time, StageExecTask (End) is needed to ensure that the Physical tree can not only ca [...]
+
+The orchestration process of Linkis Orchestrator is similar to many SQL parsing engines (such as Spark, Hive's SQL parser). But in fact, the orchestration capability of Linkis Orchestrator is realized based on the computing governance field for the different computing governance needs of users. The SQL parsing engine is a parsing orchestration oriented to the SQL language. Here is a simple distinction:
+
+1. What Linkis Orchestrator mainly wants to solve is the orchestration requirements caused by different computing tasks for computing strategies. For example, in order to be multi-active, Orchestrator will submit a calculation task for the user, based on the "multi-active" computing strategy requirements, compile a physical tree, so as to submit to multiple clusters to perform this calculation task. And in the process of constructing the entire Physical tree, various possible abnormal sc [...]
+2. The orchestration ability of Linkis Orchestrator has nothing to do with the programming language. In theory, as long as an engine has adapted to Linkis, all the programming languages it supports can be orchestrated, while the SQL parsing engine only cares about the analysis and execution of SQL, and is only responsible for parsing a piece of SQL into one executable Physical tree, and finally calculate the result.
+3. Linkis Orchestrator also has the ability to parse SQL, but SQL parsing is just one of Orchestrator Parser's analytic implementations for the SQL programming language. The Parser of Linkis Orchestrator also considers introducing Apache Calcite to parse SQL. It supports splitting a user SQL that spans multiple computing engines (must be a computing engine that Linkis has docked) into multiple sub SQLs and submitting them to each corresponding engine during the execution phase. Finally,  [...]
+
+Please refer to [Orchestrator Architecture Design](https://github.com/WeBankFinTech/Linkis-Doc/blob/master/en_US/Architecture_Documents/Orchestrator/Orchestrator_architecture_doc.md) for more details. 
+
+After the analysis and arrangement of Linkis Orchestrator, the  computing task has been transformed into a executable physical tree. Orchestrator will submit the Physical tree to Orchestrator's Execution module and enter the final execution stage.
+
+## 3. Execution Stage
+
+The execution stage is mainly divided into the following two steps, these two steps are the last two phases of capabilities provided by Linkis Orchestrator:
+
+![Flow chart of the execution stage](../../assets/docs/architecture/JobSubmission/execution.png)
+
+The main process is as follows:
+
+- Execution: Analyze the dependencies of the Physical tree, and execute them sequentially from the leaf nodes according to the dependencies.
+- Reheater: Once the execution of a node in the Physical tree is completed, it will trigger a reheat. Reheating allows the physical tree to be dynamically adjusted according to the real-time execution.For example: it is detected that a leaf node fails to execute, and it supports retry (if it is caused by throwing ReTryExecption), the Physical tree will be automatically adjusted, and a retry parent node with exactly the same content is added to the leaf node .
+
+Let us go back to the Execution stage, where we focus on the execution logic of the ExecTask node that encapsulates the user computing task submitted to EngineConn.
+
+1. As mentioned earlier, the first step in the preparation phase is to obtain a usable EngineConn from LinkisManager. After ExecTask gets this EngineConn, it will submit the user's computing task to EngineConn through an RPC request.
+2. After EngineConn receives the computing task, it will asynchronously submit it to the underlying computing storage engine through the thread pool, and then immediately return an execution ID.
+3. After ExecTask gets this execution ID, it can then use the this ID to asynchronously pull the execution status of the computing task (such as: status, progress, log, result set, etc.).
+4. At the same time, EngineConn will monitor the execution of the underlying computing storage engine in real time through multiple registered Listeners. If the computing storage engine does not support registering Listeners, EngineConn will start a daemon thread for the computing task and periodically pull the execution status from the computing storage engine.
+5. EngineConn will pull the execution status back to the microservice where Orchestrator is located in real time through RCP request.
+6. After the Receiver of the microservice receives the execution status, it will broadcast it through the ListenerBus, and the Orchestrator Execution will consume the event and dynamically update the execution status of the Physical tree.
+7. The result set generated by the calculation task will be written to storage media such as HDFS at the EngineConn side. EngineConn returns only the result set path through RPC, Execution consumes the event, and broadcasts the obtained result set path through ListenerBus, so that the Listener registered by Entrance with Orchestrator can consume the result set path and write the result set path Persist to JobHistory.
+8. After the execution of the computing task on the EngineConn side is completed, through the same logic, the Execution will be triggered to update the state of the ExecTask node of the Physical tree, so that the Physical tree will continue to execute until the entire tree is completely executed. At this time, Execution will broadcast the completion status of the calculation task through ListenerBus.
+9. After the Entrance registered Listener with the Orchestrator consumes the state event, it updates the job state to JobHistory, and the entire task execution is completed.
+
+----
+
+Finally, let's take a look at how the client side knows the state of the calculation task and obtains the calculation result in time, as shown in the following figure:
+
+![Results acquisition process](../../assets/docs/architecture/JobSubmission/result_acquisition.png)
+
+The specific process is as follows:
+
+1. The client periodically polls to request Entrance to obtain the status of the computing task.
+2. Once the status is flipped to success, it sends a request for job information to JobHistory, and gets all the result set paths.
+3. Initiate a query file content request to PublicService through the result set path, and obtain the content of the result set.
+
+Since then, the entire process of  job submission -> preparation -> execution have been completed.
+
diff --git a/src/docs/architecture/JobSubmission_zh.md b/src/docs/architecture/JobSubmission_zh.md
new file mode 100644
index 0000000..28ffaa4
--- /dev/null
+++ b/src/docs/architecture/JobSubmission_zh.md
@@ -0,0 +1,165 @@
+# JobSubmission
+
+计算任务(Job)的提交执行是Linkis提供的核心能力,它几乎串通了Linkis计算治理架构中的所有模块,在Linkis之中占据核心地位。
+
+我们将用户的计算任务从客户端提交开始,到最后的返回结果为止,整个流程分为三个阶段:提交 -> 准备 -> 执行,如下图所示:
+
+![计算任务整体流程图](../../assets/docs/architecture/JobSubmission/overall.png)
+
+其中:
+
+- Entrance作为提交阶段的入口,提供任务的接收、调度和Job信息的转发能力,是所有计算型任务的统一入口,它将把计算任务转发给Orchestrator进行编排和执行;
+
+- Orchestrator作为准备阶段的入口,主要提供了Job的解析、编排和执行能力。。
+
+- Linkis Manager:是计算治理能力的管理中枢,主要的职责为:
+  
+  1. ResourceManager:不仅具备对Yarn和Linkis EngineConnManager的资源管理能力,还将提供基于标签的多级资源分配和回收能力,让ResourceManager具备跨集群、跨计算资源类型的全资源管理能力;
+  
+  2. AppManager:统筹管理所有的EngineConnManager和EngineConn,包括EngineConn的申请、复用、创建、切换、销毁等生命周期全交予AppManager进行管理;
+  
+  3. LabelManager:将基于多级组合标签,为跨IDC、跨集群的EngineConn和EngineConnManager路由和管控能力提供标签支持;
+  
+  4. EngineConnPluginServer:对外提供启动一个EngineConn的所需资源生成能力和EngineConn的启动命令生成能力。
+
+- EngineConnManager:是EngineConn的管理器,提供引擎的生命周期管理,同时向RM汇报负载信息和自身的健康状况。
+
+- EngineConn:是Linkis与底层计算存储引擎的实际连接器,用户所有的计算存储任务最终都会交由EngineConn提交给底层计算存储引擎。根据用户的不同使用场景,EngineConn提供了交互式计算、流式计算、离线计算、数据存储任务的全栈计算能力框架支持。
+
+接下来,我们将详细介绍计算任务从 提交 -> 准备 -> 执行 的三个阶段。
+
+## 一、提交阶段
+
+提交阶段主要是Client端 -> Linkis Gateway -> Entrance的交互,其流程如下:
+
+![提交阶段流程图](../../assets/docs/architecture/JobSubmission/submission.png)
+
+1. 首先,Client(如前端或客户端)发起Job请求,Job请求信息精简如下(关于Linkis的具体使用方式,请参考 [如何使用Linkis](/#/docs/manual/HowToUse)):
+
+```
+POST /api/rest_j/v1/entrance/submit
+```
+
+```json
+{
+    "executionContent": {"code": "show tables", "runType": "sql"},
+    "params": {"variable": {}, "configuration": {}},  //非必须
+    "source": {"scriptPath": "file:///1.hql"}, //非必须,仅用于记录代码来源
+    "labels": {
+        "engineType": "spark-2.4.3",  //指定引擎
+        "userCreator": "johnnwnag-IDE"  // 指定提交用户和提交系统
+    }
+}
+```
+
+2. Linkis-Gateway接收到请求后,根据URI ``/api/rest_j/v1/${serviceName}/.+``中的serviceName,确认路由转发的微服务名,这里Linkis-Gateway会解析出微服务名为entrance,将Job请求转发给Entrance微服务。需要说明的是:如果用户指定了路由标签,则在转发时,会根据路由标签选择打了相应标签的Entrance微服务实例进行转发,而不是随机转发。
+
+3. Entrance接收到Job请求后,会先简单校验请求的合法性,然后通过RPC调用JobHistory对Job的信息进行持久化,然后将Job请求封装为一个计算任务,放入到调度队列之中,等待被消费线程消费。
+
+4. 调度队列会为每个组开辟一个消费队列 和 一个消费线程,消费队列用于存放已经初步封装的用户计算任务,消费线程则按照FIFO的方式,不断从消费队列中取出计算任务进行消费。目前默认的分组方式为 Creator + User(即提交系统 + 用户),因此,即便是同一个用户,只要是不同的系统提交的计算任务,其实际的消费队列和消费线程都完全不同,完全隔离互不影响。(温馨提示:用户可以按需修改分组算法)
+
+5. 消费线程取出计算任务后,会将计算任务提交给Orchestrator,由此正式进入准备阶段。
+
+## 二、 准备阶段
+
+准备阶段主要有两个流程,一是向LinkisManager申请一个可用的EngineConn,用于接下来的计算任务提交执行,二是Orchestrator对Entrance提交过来的计算任务进行编排,将一个用户计算请求,通过编排转换成一个物理执行树,然后交给第三阶段的执行阶段去真正提交执行。
+
+#### 2.1 向LinkisManager申请可用EngineConn
+
+如果在LinkisManager中,该用户存在可复用的EngineConn,则直接锁定该EngineConn,并返回给Orchestrator,整个申请流程结束。
+
+如何定义可复用EngineConn?指能匹配计算任务的所有标签要求的,且EngineConn本身健康状态为Healthy(负载低且实际EngineConn状态为Idle)的,然后再按规则对所有满足条件的EngineConn进行排序选择,最终锁定一个最佳的EngineConn。
+
+如果该用户不存在可复用的EngineConn,则此时会触发EngineConn新增流程,关于EngineConn新增流程,请参数:[EngineConn新增流程](#/docs/architecture/AddEngineConn) 。
+
+#### 2.2 计算任务编排
+
+Orchestrator主要负责将一个计算任务(JobReq),编排成一棵可以真正执行的物理执行树(PhysicalTree),并提供Physical树的执行能力。
+
+这里先重点介绍Orchestrator的计算任务编排能力,如下图:
+
+![编排流程图](../../assets/docs/architecture/JobSubmission/orchestrate.png)
+
+其主要流程如下:
+
+- Converter(转换):完成对用户提交的JobReq(任务请求)转换为Orchestrator的ASTJob,该步骤会对用户提交的计算任务进行参数检查和信息补充,如变量替换等;
+
+- Parser(解析):完成对ASTJob的解析,将ASTJob拆成由ASTJob和ASTStage组成的一棵AST树。
+
+- Validator(校验): 完成对ASTJob和ASTStage的检验和信息补充,如代码检查、必须的Label信息补充等。
+
+- Planner(计划):将一棵AST树转换为一棵Logical树。此时的Logical树已经由LogicalTask组成,包含了整个计算任务的所有执行逻辑。
+
+- Optimizer(优化阶段):将一棵Logical树转换为Physica树,并对Physical树进行优化。
+
+一棵Physical树,其中的很多节点都是计算策略逻辑,只有中间的ExecTask,才真正封装了将用户计算任务提交给EngineConn进行提交执行的执行逻辑。如下图所示:
+
+![Physical树](../../assets/docs/architecture/JobSubmission/physical_tree.png)
+
+不同的计算策略,其Physical树中的JobExecTask 和 StageExecTask所封装的执行逻辑各不相同。
+
+如多活计算策略下,用户提交的一个计算任务,其提交给不同集群的EngineConn进行执行的执行逻辑封装在了两个ExecTask中,而相关的多活策略逻辑则体现在了两个ExecTask的父节点StageExecTask(End)之中。
+
+这里举多活计算策略下的多读场景。
+
+多读时,实际只要求一个ExecTask返回结果,该Physical树就可以标记为执行成功并返回结果了,但Physical树只具备按依赖关系进行依次执行的能力,无法终止某个节点的执行,且一旦某个节点被取消执行或执行失败,则整个Physical树其实会被标记为执行失败,这时就需要StageExecTask(End)来做一些特殊的处理,来保证既可以取消另一个ExecTask,又能把执行成功的ExecTask所产生的结果集继续往上传,让Physical树继续往上执行。这就是StageExecTask所代表的计算策略执行逻辑。
+
+Linkis Orchestrator的编排流程与很多SQL解析引擎(如Spark、Hive的SQL解析器)存在相似的地方,但实际上,Linkis Orchestrator是面向计算治理领域针对用户不同的计算治理需求,而实现的解析编排能力,而SQL解析引擎是面向SQL语言的解析编排。这里做一下简单区分:
+
+1. Linkis Orchestrator主要想解决的,是不同计算任务对计算策略所引发出的编排需求。如:用户想具备多活的能力,则Orchestrator会为用户提交的一个计算任务,基于“多活”的计算策略需求,编排出一棵Physical树,从而做到往多个集群去提交执行这个计算任务,并且在构建整个Physical树的过程中,已经充分考虑了各种可能存在的异常场景,并都已经体现在了Physical树中。
+
+2. Linkis Orchestrator的编排能力与编程语言无关,理论上只要是Linkis已经对接的引擎,其支持的所有编程语言都支持编排;而SQL解析引擎只关心SQL的解析和执行,只负责将一条SQL解析成一颗可执行的Physical树,最终计算出结果。
+
+3. Linkis Orchestrator也具备对SQL的解析能力,但SQL解析只是Orchestrator Parser针对SQL这种编程语言的其中一种解析实现。Linkis Orchestrator的Parser也考虑引入Apache Calcite对SQL进行解析,支持将一条跨多个计算引擎(必须是Linkis已经对接的计算引擎)的用户SQL,拆分成多条子SQL,在执行阶段时分别提交给对应的计算引擎进行执行,最后选择一个合适的计算引擎进行汇总计算。
+
+关于Orchestrator的编排详细介绍,请参考:[Orchestrator架构设计](https://github.com/WeBankFinTech/Linkis-Doc/blob/master/zh_CN/Architecture_Documents/Orchestrator/Orchestrator_architecture_doc.md)
+
+经过了Linkis Orchestrator的解析编排后,用户的计算任务已经转换成了一颗可被执行的Physical树。Orchestrator会将该Physical树提交给Orchestrator的Execution模块,进入最后的执行阶段。
+
+## 三、执行阶段
+
+执行阶段主要分为如下两步,这两步是Linkis Orchestrator提供的最后两阶段的能力:
+
+![执行阶段流程图](../../assets/docs/architecture/JobSubmission/execution.png)
+
+其主要流程如下:
+
+- Execution(执行):解析Physical树的依赖关系,按照依赖从叶子节点开始依次执行。
+
+- Reheater(再热):一旦Physical树有节点执行完成,都会触发一次再热。再热允许依照Physical树的实时执行情况,动态调整Physical树,继续进行执行。如:检测到某个叶子节点执行失败,且该叶子节点支持重试(如失败原因是抛出了ReTryExecption),则自动调整Physical树,在该叶子节点上面添加一个内容完全相同的重试父节点。
+
+我们回到Execution阶段,这里重点介绍封装了将用户计算任务提交给EngineConn的ExecTask节点的执行逻辑。
+
+1. 前面有提到,准备阶段的第一步,就是向LinkisManager获取一个可用的EngineConn,ExecTask拿到这个EngineConn后,会通过RPC请求,将用户的计算任务提交给EngineConn。
+
+2. EngineConn接收到计算任务之后,会通过线程池异步提交给底层的计算存储引擎,然后马上返回一个执行ID。
+
+3. ExecTask拿到这个执行ID后,后续可以通过该执行ID异步去拉取计算任务的执行情况(如:状态、进度、日志、结果集等)。
+
+4. 同时,EngineConn会通过注册的多个Listener,实时监听底层计算存储引擎的执行情况。如果该计算存储引擎不支持注册Listener,则EngineConn会为计算任务启动守护线程,定时向计算存储引擎拉取执行情况。
+
+5. EngineConn将拉取到的执行情况,通过RCP请求,实时传回Orchestrator所在的微服务。
+
+6. 该微服务的Receiver接收到执行情况后,会通过ListenerBus进行广播,Orchestrator的Execution消费该事件并动态更新Physical树的执行情况。
+
+7. 计算任务所产生的结果集,会在EngineConn端就写入到HDFS等存储介质之中。EngineConn通过RPC传回的只是结果集路径,Execution消费事件,并将获取到的结果集路径通过ListenerBus进行广播,使Entrance向Orchestrator注册的Listener能消费到该结果集路径,并将结果集路径写入持久化到JobHistory之中。
+
+8. EngineConn端的计算任务执行完成后,通过同样的逻辑,会触发Execution更新Physical树该ExecTask节点的状态,使得Physical树继续往上执行,直到整棵树全部执行完成。这时Execution会通过ListenerBus广播计算任务执行完成的状态。
+
+9. Entrance向Orchestrator注册的Listener消费到该状态事件后,向JobHistory更新Job的状态,整个任务执行完成。
+
+----
+
+最后,我们再来看下Client端是如何得知计算任务状态变化,并及时获取到计算结果的,具体如下图所示:
+
+![结果获取流程](../../assets/docs/architecture/JobSubmission/result_acquisition.png)
+
+具体流程如下:
+
+1. Client端定时轮询请求Entrance,获取计算任务的状态。
+
+2. 一旦发现状态翻转为成功,则向JobHistory发送获取Job信息的请求,拿到所有的结果集路径
+
+3. 通过结果集路径向PublicService发起查询文件内容的请求,获取到结果集的内容。
+
+自此,整个Job的提交 -> 准备 -> 执行 三个阶段全部完成。
diff --git a/src/docs/deploy/linkis_en.md b/src/docs/deploy/linkis_en.md
index 7cfe807..dd17e01 100644
--- a/src/docs/deploy/linkis_en.md
+++ b/src/docs/deploy/linkis_en.md
@@ -2,9 +2,9 @@
 
 ## Notes
 
-If you are new to Linkis, you can ignore this chapter, however, if you are already a Linkis user,  we recommend you reading the following article before installing or upgrading: [Brief introduction of the difference between Linkis1.0 and Linkis0.X](https://github.com/WeBankFinTech/Linkis-Doc/blob/master/en_US/Architecture_Documents/DifferenceBetween1.0%260.x.md).
+If you are new to Linkis, you can ignore this chapter, however, if you are already a Linkis user,  we recommend you reading the following article before installing or upgrading: [Brief introduction of the difference between Linkis1.0 and Linkis0.X](#/docs/architecture/DifferenceBetween1.0&0.x).
 
-Please note: Apart from the four EngineConnPlugins included in the Linkis1.0 installation package by default: Python/Shell/Hive/Spark. You can manually install other types of engines such as JDBC depending on your own needs. For details, please refer to EngineConnPlugin installation documents.
+Please note: Apart from the four EngineConnPlugins included in the Linkis1.0 installation package by default: Python/Shell/Hive/Spark. You can manually install other types of engines such as JDBC depending on your own needs. For details, please refer to EngineConnPlugin installation documents [EngineConnPlugin installation documents](#/docs/deploy/engins).
 
 Engines that Linkis1.0 has adapted by default are listed below:
 
diff --git a/src/docs/deploy/linkis_zh.md b/src/docs/deploy/linkis_zh.md
index e1c1fb6..02e50bd 100644
--- a/src/docs/deploy/linkis_zh.md
+++ b/src/docs/deploy/linkis_zh.md
@@ -1,8 +1,8 @@
 ## 注意事项
 
-&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;**如果您是首次接触并使用Linkis,您可以忽略该章节;如果您已经是 Linkis 的使用用户,安装或升级前建议先阅读:[Linkis1.0 与 Linkis0.X 的区别简述](https://github.com/WeBankFinTech/Linkis-Doc/blob/master/zh_CN/Architecture_Documents/Linkis1.0%E4%B8%8ELinkis0.X%E7%9A%84%E5%8C%BA%E5%88%AB%E7%AE%80%E8%BF%B0.md)**。
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;**如果您是首次接触并使用Linkis,您可以忽略该章节;如果您已经是 Linkis 的使用用户,安装或升级前建议先阅读:[Linkis1.0 与 Linkis0.X 的区别简述](#/docs/architecture/DifferenceBetween1.0&0.x)**。
 
-&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;请注意:除了 Linkis1.0 安装包默认已经包含的:Python/Shell/Hive/Spark四个EngineConnPlugin以外,如果大家有需要,可以手动安装如 JDBC 引擎等类型的其他引擎,具体请参考 [EngineConnPlugin引擎插件安装文档](https://github.com/WeBankFinTech/Linkis-Doc/blob/master/zh_CN/Deployment_Documents/EngineConnPlugin%E5%BC%95%E6%93%8E%E6%8F%92%E4%BB%B6%E5%AE%89%E8%A3%85%E6%96%87%E6%A1%A3.md)。
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;请注意:除了 Linkis1.0 安装包默认已经包含的:Python/Shell/Hive/Spark四个EngineConnPlugin以外,如果大家有需要,可以手动安装如 JDBC 引擎等类型的其他引擎,具体请参考 [EngineConnPlugin引擎插件安装文档](#/docs/deploy/engins)。
 
 **Linkis Docker镜像**  
 [Linkis 0.10.0 Docker](https://hub.docker.com/repository/docker/wedatasphere/linkis)
diff --git a/src/docs/manual/CliManual_en.md b/src/docs/manual/CliManual_en.md
index 0aa70c7..e6523ce 100644
--- a/src/docs/manual/CliManual_en.md
+++ b/src/docs/manual/CliManual_en.md
@@ -40,7 +40,7 @@ Linkis-cli currently only supports synchronous submission, that is, after submit
 * cli parameters
 
     | Parameter | Description | Data Type | Is Required |
-    | ----------- | -------------------------- | -------- |- --- |
+    | ----------- | -------------------------- | -------- |---- |
     | --gwUrl | Manually specify the linkis gateway address | String | No |
     | --authStg | Specify authentication policy | String | No |
     | --authKey | Specify authentication key | String | No |
@@ -50,7 +50,9 @@ Linkis-cli currently only supports synchronous submission, that is, after submit
 * Parameters
 
     | Parameter | Description | Data Type | Is Required |
-    | ----------- | -------------------------- | -------- |- --- |
+    | ----------- | -------------------------- | -------- |---- |
+    | Parameter      | Description                     | Data Type  | Is Required  |
+    | ----------- | -------------------------- | -------- | ---- |
     | -engType | Engine Type | String | Yes |
     | -runType | Execution Type | String | Yes |
     | -code | Execution code | String | No |
diff --git a/src/docs/manual/HowToUse_en.md b/src/docs/manual/HowToUse_en.md
index f450297..506c533 100644
--- a/src/docs/manual/HowToUse_en.md
+++ b/src/docs/manual/HowToUse_en.md
@@ -5,10 +5,9 @@
 ## 1. Client side usage
 
 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;If you need to connect to other applications on the basis of Linkis, you need to develop the interface provided by Linkis. Linkis provides a variety of client access interfaces. For detailed usage introduction, please refer to the following:
--[**Restful API Usage**](./../API_Documentations/Linkis task submission and execution RestAPI document.md)
--[**JDBC API Usage**](./../API_Documentations/Task Submit and Execute JDBC_API Document.md)
--[**How ​​to use Java SDK**](./../User_Manual/Linkis1.0 user use document.md)
-
+- [**Restful API Usage**](./../API_Documentations/Linkis任务提交执行RestAPI文档.md)
+- [**JDBC API Usage**](./../API_Documentations/任务提交执行JDBC_API文档.md)
+- [**How ​​to use Java SDK**](#/docs/manual/UserManual)
 ## 2. Scriptis uses Linkis
 
 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;If you need to use Linkis to complete interactive online analysis and processing, and you do not need data analysis application tools such as workflow development, workflow scheduling, data services, etc., you can Install [**Scriptis**](https://github.com/WeBankFinTech/Scriptis) separately. For detailed installation tutorial, please refer to its corresponding installation and deployment documents.
@@ -16,12 +15,12 @@
 ## 2.1. Use Scriptis to execute scripts
 
 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;Currently Scriptis supports submitting a variety of task types to Linkis, including Spark SQL, Hive SQL, Scala, PythonSpark, etc. In order to meet the needs of data analysis, the left side of Scriptis, Provides viewing user workspace information, user database and table information, user-defined functions, and HDFS directories. It also supports uploading and downloading, result set exporting and other functions. Scriptis is very simple to u [...]
-![Scriptis uses Linkis](../../assets/docs/manual/sparksql-run.png)
+![Scriptis uses Linkis](../../assets/docs/manual/sparksql_run.png)
 
 ## 2.2. Scriptis Management Console
 
 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;Linkis provides an interface for resource configuration and management. If you want to configure and manage task resources, you can set it on the Scriptis management console interface, including queue settings and resource configuration , The number of engine instances, etc. Through the management console, you can easily configure the resources for submitting tasks to Linkis, making it more convenient and faster.
-![Scriptis uses Linkis](../../assets/docs/manual/queue-set.png)
+![Scriptis uses Linkis](../../assets/docs/manual/queue_set.png)
 
 ## 3. DataSphere Studio uses Linkis
 
diff --git a/src/docs/manual/HowToUse_zh.md b/src/docs/manual/HowToUse_zh.md
index 9bbc435..f1b233b 100644
--- a/src/docs/manual/HowToUse_zh.md
+++ b/src/docs/manual/HowToUse_zh.md
@@ -5,15 +5,15 @@
 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;如果需要在Linkis的基础上,接入其它应用,需要针对Linkis提供的接口进行开发,Linkis提供了多种客户端接入接口,更详细的使用介绍可以参考以下内容:  
 - [**Restful API使用方式**](./../API_Documentations/Linkis任务提交执行RestAPI文档.md)
 - [**JDBC API使用方式**](./../API_Documentations/任务提交执行JDBC_API文档.md)
-- [**Java SDK使用方式**](./../User_Manual/Linkis1.0用户使用文档.md)
+- [**Java SDK使用方式**](#/docs/manual/UserManual)
 ## 2. Scriptis使用Linkis
 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;如果需要使用Linkis完成交互式在线分析处理的工作,并且不需要诸如工作流开发、工作流调度、数据服务等数据分析应用工具,可以单独安装[**Scriptis**](https://github.com/WeBankFinTech/Scriptis),详细安装教程可参考其对应的安装部署文档。  
 ## 2.1. 使用Scriptis执行脚本
 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;目前Scriptis支持向Linkis提交多种任务类型,包括Spark SQL、Hive SQL、Scala、PythonSpark等,为了满足数据分析的需求,Scriptis左侧,提供查看用户工作空间信息、用户数据库和表信息、用户自定义函数,以及HDFS目录,同时支持上传下载,结果集导出等功能。Scriptis使用Linkis十分简单,可以很方便的在编辑栏书写脚本,提交到Linkis运行。  
-![Scriptis使用Linkis](../../assets/docs/manual/sparksql-run.png)
+![Scriptis使用Linkis](../../assets/docs/manual/sparksql_run.png)
 ## 2.2. Scriptis管理台
 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;Linkis提供资源配置和管理的接口,如果希望对任务资源进行配置管理,可以在Scriptis的管理台界面进行设置,包括队列设置、资源配置、引擎实例个数等。通过管理台,可以很方便的配置向Linkis提交任务的资源,使得更加方便快捷。  
-![Scriptis使用Linkis](../../assets/docs/manual/queue-set.png)
+![Scriptis使用Linkis](../../assets/docs/manual/queue_set.png)
 
 ## 3. DataSphere Studio使用Linkis
 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;[**DataSphere Studio**](https://github.com/WeBankFinTech/DataSphereStudio)简称DSS,是微众银行大数据平台开源的一站式数据分析处理平台,DSS交互式分析模块集成了Scriptis,使用DSS进行交互式分析和Scriptis一样,除了提供Scriptis的基本功能外,DSS提供和集成了更加丰富和强大的数据分析功能,包括用于数据提取的数据服务、开发报表的工作流、可视化分析软件Visualis等。由于原生的支持,目前DSS是与Linkis集成度最高的软件,如果希望使用完整的Linkis功能,建议使用DSS搭配Linkis一起使用。  
diff --git a/src/pages/docs/architecture/AddEngineConn.vue b/src/pages/docs/architecture/AddEngineConn.vue
new file mode 100644
index 0000000..e78fb7d
--- /dev/null
+++ b/src/pages/docs/architecture/AddEngineConn.vue
@@ -0,0 +1,13 @@
+<template>
+  <docEn v-if="lang === 'en'"></docEn>
+  <docZh v-else></docZh>
+</template>
+<script setup>
+  import { ref } from "vue";
+
+  import docEn from '../../../docs/architecture/AddEngineConn_en.md';
+  import docZh from '../../../docs/architecture/AddEngineConn_zh.md';
+
+  // 初始化语言
+  const lang = ref(localStorage.getItem('locale') || 'en');
+</script>
diff --git a/src/pages/docs/architecture/DifferenceBetween1.0&0.x.vue b/src/pages/docs/architecture/DifferenceBetween1.0&0.x.vue
new file mode 100644
index 0000000..8883613
--- /dev/null
+++ b/src/pages/docs/architecture/DifferenceBetween1.0&0.x.vue
@@ -0,0 +1,13 @@
+<template>
+  <docEn v-if="lang === 'en'"></docEn>
+  <docZh v-else></docZh>
+</template>
+<script setup>
+  import { ref } from "vue";
+
+  import docEn from '../../../docs/architecture/DifferenceBetween1.0&0.x_en.md';
+  import docZh from '../../../docs/architecture/DifferenceBetween1.0&0.x_zh.md';
+
+  // 初始化语言
+  const lang = ref(localStorage.getItem('locale') || 'en');
+</script>
diff --git a/src/pages/docs/architecture/JobSubmission.vue b/src/pages/docs/architecture/JobSubmission.vue
new file mode 100644
index 0000000..f12c3a1
--- /dev/null
+++ b/src/pages/docs/architecture/JobSubmission.vue
@@ -0,0 +1,13 @@
+<template>
+  <docEn v-if="lang === 'en'"></docEn>
+  <docZh v-else></docZh>
+</template>
+<script setup>
+  import { ref } from "vue";
+
+  import docEn from '../../../docs/architecture/JobSubmission_en.md';
+  import docZh from '../../../docs/architecture/JobSubmission_zh.md';
+
+  // 初始化语言
+  const lang = ref(localStorage.getItem('locale') || 'en');
+</script>
diff --git a/src/pages/docs/index.vue b/src/pages/docs/index.vue
index fa3d5f0..26b759a 100644
--- a/src/pages/docs/index.vue
+++ b/src/pages/docs/index.vue
@@ -85,6 +85,24 @@
                 }]
 
 
+        },
+        {
+            title: '架构文档',
+            link: '/docs/architecture/DifferenceBetween1.0&0.x',
+            children: [
+                {
+                    title: 'Linkis1.0与Linkis0.X的区别简述',
+                    link: '/docs/architecture/DifferenceBetween1.0&0.x',
+                },
+                {
+                    title: 'Job提交准备执行流程',
+                    link: '/docs/architecture/JobSubmission',
+                }, {
+                    title: 'EngineConn新增流程',
+                    link: '/docs/architecture/AddEngineConn',
+                }]
+
+
         }
     ]
 </script>
diff --git a/src/router.js b/src/router.js
index bf088d4..b6d97a0 100644
--- a/src/router.js
+++ b/src/router.js
@@ -34,7 +34,7 @@ const routes = [{
       component: () => import( /* webpackChunkName: "group-doc_UserManual" */ './pages/docs/manual/UserManual.vue')
     },{
       path: 'manual/HowToUse',
-      name: 'manual/HowToUse',
+      name: 'manualHowToUse',
       component: () => import( /* webpackChunkName: "group-doc_HowToUse" */ './pages/docs/manual/HowToUse.vue')
     },{
       path: 'manual/ConsoleUserManual',
@@ -44,7 +44,22 @@ const routes = [{
         path: 'manual/CliManual',
         name: 'manualCliManual',
         component: () => import( /* webpackChunkName: "group-doc_CliManual" */ './pages/docs/manual/CliManual.vue')
-      }]
+      },
+
+      {
+        path: 'architecture/JobSubmission',
+        name: 'architectureJobSubmission',
+        component: () => import( /* webpackChunkName: "group-doc_JobSubmission" */ './pages/docs/architecture/JobSubmission.vue')
+      },{
+        path: 'architecture/AddEngineConn',
+        name: 'architectureAddEngineConn',
+        component: () => import( /* webpackChunkName: "group-doc_AddEngineConn" */ './pages/docs/architecture/AddEngineConn.vue')
+      },{
+        path: 'architecture/DifferenceBetween1.0&0.x',
+        name: 'architectureDifferenceBetween1.0&0.x',
+        component: () => import( /* webpackChunkName: "group-doc_DifferenceBetween1.0&0.x" */ './pages/docs/architecture/DifferenceBetween1.0&0.x.vue')
+      }
+    ]
   },
   {
     path: '/faq/index',

---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@linkis.apache.org
For additional commands, e-mail: commits-help@linkis.apache.org