You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@seatunnel.apache.org by ki...@apache.org on 2022/04/27 03:58:44 UTC

[incubator-seatunnel-website] branch main updated: Add 2.1.1 docs (#115)

This is an automated email from the ASF dual-hosted git repository.

kirs pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/incubator-seatunnel-website.git


The following commit(s) were added to refs/heads/main by this push:
     new 8818b102 Add 2.1.1 docs (#115)
8818b102 is described below

commit 8818b102e088237cd919363895c5b915d758ecf6
Author: Wenjun Ruan <we...@apache.org>
AuthorDate: Wed Apr 27 11:58:38 2022 +0800

    Add 2.1.1 docs (#115)
    
    * Add 2.1.1 docs
    
    * update download page
    
    * update node version to 14 in README
    
    * rename intro dir
    
    * Change introduction to intro/about
---
 README.md                                          |   2 +-
 README_ZH.md                                       |   2 +-
 docusaurus.config.js                               |   3 +-
 src/pages/download/data.json                       |  14 +
 src/pages/versions/config.json                     |  14 +-
 versioned_docs/version-2.1.1/command/usage.mdx     | 164 +++++++++
 .../version-2.1.1/connector/config-example.md      |   8 +
 .../version-2.1.1/connector/sink/Clickhouse.md     | 146 ++++++++
 .../version-2.1.1/connector/sink/ClickhouseFile.md | 162 +++++++++
 .../version-2.1.1/connector/sink/Console.mdx       | 101 ++++++
 .../version-2.1.1/connector/sink/Doris.mdx         | 174 ++++++++++
 .../version-2.1.1/connector/sink/Druid.md          | 106 ++++++
 .../version-2.1.1/connector/sink/Elasticsearch.mdx | 118 +++++++
 .../version-2.1.1/connector/sink/Email.md          | 101 ++++++
 .../version-2.1.1/connector/sink/File.mdx          | 190 +++++++++++
 .../version-2.1.1/connector/sink/Hbase.md          |  66 ++++
 .../version-2.1.1/connector/sink/Hive.md           |  70 ++++
 .../version-2.1.1/connector/sink/Hudi.md           |  41 +++
 .../version-2.1.1/connector/sink/Iceberg.md        |  68 ++++
 .../version-2.1.1/connector/sink/InfluxDb.md       |  88 +++++
 .../version-2.1.1/connector/sink/Jdbc.mdx          | 189 +++++++++++
 .../version-2.1.1/connector/sink/Kafka.md          |  62 ++++
 .../version-2.1.1/connector/sink/Kudu.md           |  40 +++
 .../version-2.1.1/connector/sink/MongoDB.md        |  49 +++
 .../version-2.1.1/connector/sink/Phoenix.md        |  53 +++
 .../version-2.1.1/connector/sink/Redis.md          |  82 +++++
 .../version-2.1.1/connector/sink/Tidb.md           |  86 +++++
 .../version-2.1.1/connector/sink/common-options.md |  45 +++
 .../version-2.1.1/connector/source/Druid.md        |  65 ++++
 .../connector/source/Elasticsearch.md              |  62 ++++
 .../version-2.1.1/connector/source/Fake.mdx        | 135 ++++++++
 .../version-2.1.1/connector/source/FeishuSheet.md  |  59 ++++
 .../version-2.1.1/connector/source/File.mdx        | 124 +++++++
 .../version-2.1.1/connector/source/Hbase.md        |  44 +++
 .../version-2.1.1/connector/source/Hive.md         |  64 ++++
 .../version-2.1.1/connector/source/Http.md         |  61 ++++
 .../version-2.1.1/connector/source/Hudi.md         |  76 +++++
 .../version-2.1.1/connector/source/Iceberg.md      |  59 ++++
 .../version-2.1.1/connector/source/InfluxDb.md     |  87 +++++
 .../version-2.1.1/connector/source/Jdbc.mdx        | 205 ++++++++++++
 .../version-2.1.1/connector/source/Kafka.mdx       | 177 ++++++++++
 .../version-2.1.1/connector/source/Kudu.md         |  43 +++
 .../version-2.1.1/connector/source/MongoDB.md      |  62 ++++
 .../version-2.1.1/connector/source/Phoenix.md      |  58 ++++
 .../version-2.1.1/connector/source/Redis.md        |  87 +++++
 .../version-2.1.1/connector/source/Socket.mdx      | 102 ++++++
 .../version-2.1.1/connector/source/Tidb.md         |  66 ++++
 .../version-2.1.1/connector/source/Webhook.md      |  42 +++
 .../connector/source/common-options.mdx            |  89 +++++
 .../version-2.1.1/connector/source/neo4j.md        | 143 ++++++++
 .../contribution/contribute-plugin.md              | 125 +++++++
 .../version-2.1.1/contribution/new-license.md      |  25 ++
 versioned_docs/version-2.1.1/contribution/setup.md |  86 +++++
 versioned_docs/version-2.1.1/deployment.mdx        | 124 +++++++
 versioned_docs/version-2.1.1/faq.md                | 367 +++++++++++++++++++++
 versioned_docs/version-2.1.1/intro/about.md        |  72 ++++
 versioned_docs/version-2.1.1/intro/history.md      |  15 +
 versioned_docs/version-2.1.1/intro/why.md          |  13 +
 versioned_docs/version-2.1.1/introduction/about.md |  72 ++++
 .../version-2.1.1/introduction/history.md          |  15 +
 versioned_docs/version-2.1.1/introduction/why.md   |  13 +
 versioned_docs/version-2.1.1/start/docker.md       |   8 +
 versioned_docs/version-2.1.1/start/kubernetes.mdx  | 268 +++++++++++++++
 versioned_docs/version-2.1.1/start/local.mdx       | 150 +++++++++
 .../version-2.1.1/transform/common-options.mdx     | 116 +++++++
 versioned_docs/version-2.1.1/transform/json.md     | 195 +++++++++++
 versioned_docs/version-2.1.1/transform/split.mdx   | 122 +++++++
 versioned_docs/version-2.1.1/transform/sql.md      |  60 ++++
 versioned_sidebars/version-2.1.1-sidebars.json     | 106 ++++++
 versions.json                                      |   1 +
 70 files changed, 6100 insertions(+), 7 deletions(-)

diff --git a/README.md b/README.md
index d6b4cbf7..25767c4a 100644
--- a/README.md
+++ b/README.md
@@ -21,7 +21,7 @@ asf-staging #The asf-staging official website test environment is accessed throu
 
 This website is compiled using node, using Docusaurus framework components
 
-1. Download and install nodejs (version>12.5.0)
+1. Download and install nodejs (version>14)
 2. Clone the code to the local `git clone git@github.com:apache/incubator-seatunnel-website.git`
 3. Run `./tools/build-docs.sh` to fetch and prepare docs form **apache/incubator-seatunnel**, for more information you could see [how our document work](HOW_DOC_WORK.md)
 4. Run `npm install` to install the required dependent libraries.
diff --git a/README_ZH.md b/README_ZH.md
index ff5b5121..ab93c6c3 100644
--- a/README_ZH.md
+++ b/README_ZH.md
@@ -21,7 +21,7 @@ asf-staging 官网测试环境  通过https://seatunnel.staged.apache.org 访问
 
 本网站是使用node编译的,使用的是Docusaurus框架组件
 
-1. 下载并安装 nodejs(version>12.5.0)
+1. 下载并安装 nodejs(version>14)
 2. 克隆代码到本地 `git clone  git@github.com:apache/incubator-seatunnel-website.git`
 3. 运行 `./tools/build-docs.sh` 从 **apache/incubator-seatunnel** 中拉取、准备文档。如果想要了解更多细节和操作请阅读[文档如何工作](HOW_DOC_WORK.md)
 4. 运行 `npm install` 来安装所需的依赖库。
diff --git a/docusaurus.config.js b/docusaurus.config.js
index db5f150c..b54e1dc2 100644
--- a/docusaurus.config.js
+++ b/docusaurus.config.js
@@ -85,7 +85,8 @@ const config = {
                     items: [
                         {
                             label: versions[0],
-                            to: `docs/${versions[0]}/introduction`,
+                            to: `docs/${versions[0]}/intro/about`,
+
                         },
                         ...versions.slice(1).map((version) => ({
                             label: (version === "1.x") ? "1.x(Not Apache Release)" : version,
diff --git a/src/pages/download/data.json b/src/pages/download/data.json
index 341b55ee..0709e0bc 100644
--- a/src/pages/download/data.json
+++ b/src/pages/download/data.json
@@ -1,4 +1,18 @@
 [
+	{
+		"date": "2022-04-25",
+		"version": "v2.1.1",
+		"sourceCode": {
+			"src": "https://www.apache.org/dyn/closer.lua/incubator/seatunnel/2.1.1/apache-seatunnel-incubating-2.1.1-src.tar.gz",
+			"asc": "https://downloads.apache.org/incubator/seatunnel/2.1.1/apache-seatunnel-incubating-2.1.1-src.tar.gz.asc",
+			"sha512": "https://downloads.apache.org/incubator/seatunnel/2.1.1/apache-seatunnel-incubating-2.1.-src.tar.gz.sha512"
+		},
+		"binaryDistribution": {
+			"bin": "https://www.apache.org/dyn/closer.lua/incubator/seatunnel/2.1.0/apache-seatunnel-incubating-2.1.0-bin.tar.gz",
+			"asc": "https://downloads.apache.org/incubator/seatunnel/2.1.0/apache-seatunnel-incubating-2.1.0-bin.tar.gz.asc",
+			"sha512": "https://downloads.apache.org/incubator/seatunnel/2.1.0/apache-seatunnel-incubating-2.1.0-bin.tar.gz.sha512"
+		}
+	},
 	{
 		"date": "2022-03-18",
 		"version": "v2.1.0",
diff --git a/src/pages/versions/config.json b/src/pages/versions/config.json
index fc7d47f5..b2f08abf 100644
--- a/src/pages/versions/config.json
+++ b/src/pages/versions/config.json
@@ -50,10 +50,10 @@
       "nextLink": "/docs/intro/about",
       "latestData": [
         {
-          "versionLabel": "2.1.0",
-          "docUrl": "/docs/2.1.0/introduction",
-          "downloadUrl": "https://github.com/apache/incubator-seatunnel/releases/tag/2.1.0",
-          "sourceTag": "2.1.0"
+          "versionLabel": "2.1.1",
+          "docUrl": "/docs/2.1.1/intro/about",
+          "downloadUrl": "https://github.com/apache/incubator-seatunnel/releases/tag/2.1.1",
+          "sourceTag": "2.1.1"
         }
       ],
       "nextData": [
@@ -63,6 +63,12 @@
         }
       ],
       "historyData": [
+        {
+          "versionLabel": "2.1.1",
+          "docUrl": "/docs/2.1.1/intro/about",
+          "downloadUrl": "https://github.com/apache/incubator-seatunnel/releases/tag/2.1.1",
+          "sourceTag": "2.1.1"
+        },
         {
           "versionLabel": "2.1.0",
           "docUrl": "/docs/2.1.0/introduction",
diff --git a/versioned_docs/version-2.1.1/command/usage.mdx b/versioned_docs/version-2.1.1/command/usage.mdx
new file mode 100644
index 00000000..364ecd43
--- /dev/null
+++ b/versioned_docs/version-2.1.1/command/usage.mdx
@@ -0,0 +1,164 @@
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
+
+# Command usage
+
+## Command Entrypoint
+
+<Tabs
+    groupId="engine-type"
+    defaultValue="spark"
+    values={[
+        {label: 'Spark', value: 'spark'},
+        {label: 'Flink', value: 'flink'},
+    ]}>
+<TabItem value="spark">
+
+```bash
+bin/start-seatunnel-spark.sh
+```
+
+</TabItem>
+<TabItem value="flink">
+
+```bash
+bin/start-seatunnel-flink.sh  
+```
+
+</TabItem>
+</Tabs>
+
+
+## Options
+
+<Tabs
+    groupId="engine-type"
+    defaultValue="spark"
+    values={[
+        {label: 'Spark', value: 'spark'},
+        {label: 'Flink', value: 'flink'},
+    ]}>
+<TabItem value="spark">
+
+```bash
+bin/start-seatunnel-spark.sh \
+    -c config-path \
+    -m master \
+    -e deploy-mode \
+    -i city=beijing
+```
+
+- Use `-m` or `--master` to specify the cluster manager
+
+- Use `-e` or `--deploy-mode` to specify the deployment mode
+
+</TabItem>
+<TabItem value="flink">
+
+```bash
+bin/start-seatunnel-flink.sh \
+    -c config-path \
+    -i key=value \
+    -r run-application \
+    [other params]
+```
+
+- Use `-r` or `--run-mode` to specify the flink job run mode, you can use `run-application` or `run` (default value)
+
+</TabItem>
+</Tabs>
+
+- Use `-c` or `--config` to specify the path of the configuration file
+
+- Use `-i` or `--variable` to specify the variables in the configuration file, you can configure multiple
+
+## Example
+
+<Tabs
+    groupId="engine-type"
+    defaultValue="spark"
+    values={[
+        {label: 'Spark', value: 'spark'},
+        {label: 'Flink', value: 'flink'},
+    ]}>
+<TabItem value="spark">
+
+```bash
+# Yarn client mode
+./bin/start-seatunnel-spark.sh \
+    --master yarn \
+    --deploy-mode client \
+    --config ./config/application.conf
+
+# Yarn cluster mode
+./bin/start-seatunnel-spark.sh \
+    --master yarn \
+    --deploy-mode cluster \
+    --config ./config/application.conf
+```
+
+</TabItem>
+<TabItem value="flink">
+
+```bash
+env {
+    execution.parallelism = 1
+}
+
+source {
+    FakeSourceStream {
+        result_table_name = "fake"
+        field_name = "name,age"
+    }
+}
+
+transform {
+    sql {
+        sql = "select name,age from fake where name='"${my_name}"'"
+    }
+}
+
+sink {
+    ConsoleSink {}
+}
+```
+
+**Run**
+
+```bash
+bin/start-seatunnel-flink.sh \
+    -c config-path \
+    -i my_name=kid-xiong
+```
+
+This designation will replace `"${my_name}"` in the configuration file with `kid-xiong`
+
+> For the rest of the parameters, refer to the original flink parameters. Check the flink parameter method: `bin/flink run -h` . The parameters can be added as needed. For example, `-m yarn-cluster` is specified as `on yarn` mode.
+
+```bash
+bin/flink run -h
+```
+
+For example:
+
+* `-p 2` specifies that the job parallelism is `2`
+
+```bash
+bin/start-seatunnel-flink.sh \
+    -p 2 \
+    -c config-path
+```
+
+* Configurable parameters of `flink yarn-cluster`
+
+For example: `-m yarn-cluster -ynm seatunnel` specifies that the job is running on `yarn`, and the name of `yarn WebUI` is `seatunnel`
+
+```bash
+bin/start-seatunnel-flink.sh \
+    -m yarn-cluster \
+    -ynm seatunnel \
+    -c config-path
+```
+
+</TabItem>
+</Tabs>
diff --git a/versioned_docs/version-2.1.1/connector/config-example.md b/versioned_docs/version-2.1.1/connector/config-example.md
new file mode 100644
index 00000000..e5e21e7f
--- /dev/null
+++ b/versioned_docs/version-2.1.1/connector/config-example.md
@@ -0,0 +1,8 @@
+# Config Examples
+
+This section show you the example about SeaTunnel configuration file, we already have exists useful examples in
+[example-config](https://github.com/apache/incubator-seatunnel/tree/dev/config)
+
+## What's More
+
+If you want to know the details of this format configuration, Please see [HOCON](https://github.com/lightbend/config/blob/main/HOCON.md).
\ No newline at end of file
diff --git a/versioned_docs/version-2.1.1/connector/sink/Clickhouse.md b/versioned_docs/version-2.1.1/connector/sink/Clickhouse.md
new file mode 100644
index 00000000..4d2b0bc1
--- /dev/null
+++ b/versioned_docs/version-2.1.1/connector/sink/Clickhouse.md
@@ -0,0 +1,146 @@
+# Clickhouse
+
+## Description
+
+Use [Clickhouse-jdbc](https://github.com/ClickHouse/clickhouse-jdbc) to correspond the data source according to the field name and write it into ClickHouse. The corresponding data table needs to be created in advance before use
+
+:::tip
+
+Engine Supported and plugin name
+
+* [x] Spark: Clickhouse
+* [x] Flink: Clickhouse
+
+:::
+
+
+## Options
+
+| name           | type    | required | default value |
+|----------------|---------| -------- |---------------|
+| bulk_size      | number  | no       | 20000         |
+| clickhouse.*   | string  | no       |               |
+| database       | string  | yes      | -             |
+| fields         | array   | no       | -             |
+| host           | string  | yes      | -             |
+| password       | string  | no       | -             |
+| retry          | number  | no       | 1             |
+| retry_codes    | array   | no       | [ ]           |
+| table          | string  | yes      | -             |
+| username       | string  | no       | -             |
+| split_mode     | boolean | no       | false         |
+| sharding_key   | string  | no       | -             |
+| common-options | string  | no       | -             |
+
+### bulk_size [number]
+
+The number of rows written through [Clickhouse-jdbc](https://github.com/ClickHouse/clickhouse-jdbc) each time, the `default is 20000` .
+
+### database [string]
+
+database name
+
+### fields [array]
+
+The data field that needs to be output to `ClickHouse` , if not configured, it will be automatically adapted according to the data `schema` .
+
+### host [string]
+
+`ClickHouse` cluster address, the format is `host:port` , allowing multiple `hosts` to be specified. Such as `"host1:8123,host2:8123"` .
+
+### password [string]
+
+`ClickHouse user password` . This field is only required when the permission is enabled in `ClickHouse` .
+
+### retry [number]
+
+The number of retries, the default is 1
+
+### retry_codes [array]
+
+When an exception occurs, the ClickHouse exception error code of the operation will be retried. For a detailed list of error codes, please refer to [ClickHouseErrorCode](https://github.com/ClickHouse/clickhouse-jdbc/blob/master/clickhouse-jdbc/src/main/java/ru/yandex/clickhouse/except/ClickHouseErrorCode.java)
+
+If multiple retries fail, this batch of data will be discarded, use with caution! !
+
+### table [string]
+
+table name
+
+### username [string]
+
+`ClickHouse` user username, this field is only required when permission is enabled in `ClickHouse`
+
+### clickhouse [string]
+
+In addition to the above mandatory parameters that must be specified by `clickhouse-jdbc` , users can also specify multiple optional parameters, which cover all the [parameters](https://github.com/ClickHouse/clickhouse-jdbc/blob/master/clickhouse-jdbc/src/main/java/ru/yandex/clickhouse/settings/ClickHouseProperties.java) provided by `clickhouse-jdbc` .
+
+The way to specify the parameter is to add the prefix `clickhouse.` to the original parameter name. For example, the way to specify `socket_timeout` is: `clickhouse.socket_timeout = 50000` . If these non-essential parameters are not specified, they will use the default values given by `clickhouse-jdbc`.
+
+### split_mode [boolean]
+
+This mode only support clickhouse table which engine is 'Distributed'.And `internal_replication` option 
+should be `true`. They will split distributed table data in seatunnel and perform write directly on each shard. The shard weight define is clickhouse will be 
+counted.
+
+### sharding_key [string]
+
+When use split_mode, which node to send data to is a problem, the default is random selection, but the 
+'sharding_key' parameter can be used to specify the field for the sharding algorithm. This option only 
+worked when 'split_mode' is true.
+
+### common options [string]
+
+Sink plugin common parameters, please refer to [common options](common-options.md) for details
+
+## ClickHouse type comparison table
+
+| ClickHouse field type | Convert plugin conversion goal type | SQL conversion expression     | Description                                           |
+| --------------------- | ----------------------------------- | ----------------------------- | ----------------------------------------------------- |
+| Date                  | string                              | string()                      | `yyyy-MM-dd` Format string                            |
+| DateTime              | string                              | string()                      | `yyyy-MM-dd HH:mm:ss` Format string                   |
+| String                | string                              | string()                      |                                                       |
+| Int8                  | integer                             | int()                         |                                                       |
+| Uint8                 | integer                             | int()                         |                                                       |
+| Int16                 | integer                             | int()                         |                                                       |
+| Uint16                | integer                             | int()                         |                                                       |
+| Int32                 | integer                             | int()                         |                                                       |
+| Uint32                | long                                | bigint()                      |                                                       |
+| Int64                 | long                                | bigint()                      |                                                       |
+| Uint64                | long                                | bigint()                      |                                                       |
+| Float32               | float                               | float()                       |                                                       |
+| Float64               | double                              | double()                      |                                                       |
+| Decimal(P, S)         | -                                   | CAST(source AS DECIMAL(P, S)) | Decimal32(S), Decimal64(S), Decimal128(S) Can be used |
+| Array(T)              | -                                   | -                             |                                                       |
+| Nullable(T)           | Depends on T                        | Depends on T                  |                                                       |
+| LowCardinality(T)     | Depends on T                        | Depends on T                  |                                                       |
+
+## Examples
+
+```bash
+clickhouse {
+    host = "localhost:8123"
+    clickhouse.socket_timeout = 50000
+    database = "nginx"
+    table = "access_msg"
+    fields = ["date", "datetime", "hostname", "http_code", "data_size", "ua", "request_time"]
+    username = "username"
+    password = "password"
+    bulk_size = 20000
+}
+```
+
+```bash
+ClickHouse {
+    host = "localhost:8123"
+    database = "nginx"
+    table = "access_msg"
+    fields = ["date", "datetime", "hostname", "http_code", "data_size", "ua", "request_time"]
+    username = "username"
+    password = "password"
+    bulk_size = 20000
+    retry_codes = [209, 210]
+    retry = 3
+}
+```
+
+> In case of network timeout or network abnormality, retry writing 3 times
diff --git a/versioned_docs/version-2.1.1/connector/sink/ClickhouseFile.md b/versioned_docs/version-2.1.1/connector/sink/ClickhouseFile.md
new file mode 100644
index 00000000..a4d40614
--- /dev/null
+++ b/versioned_docs/version-2.1.1/connector/sink/ClickhouseFile.md
@@ -0,0 +1,162 @@
+# ClickhouseFile
+
+## Description
+
+Generate the clickhouse data file with the clickhouse-local program, and then send it to the clickhouse 
+server, also call bulk load.
+
+:::tip
+
+Engine Supported and plugin name
+
+* [x] Spark: ClickhouseFile
+* [x] Flink
+
+:::
+
+## Options
+
+| name                   | type     | required | default value |
+|------------------------|----------|----------|---------------|
+| database               | string   | yes      | -             |
+| fields                 | array    | no       | -             |
+| host                   | string   | yes      | -             |
+| password               | string   | no       | -             |
+| table                  | string   | yes      | -             |
+| username               | string   | no       | -             |
+| sharding_key           | string   | no       | -             |
+| clickhouse_local_path  | string   | yes      | -             |
+| tmp_batch_cache_line   | int      | no       | 100000        |
+| copy_method            | string   | no       | scp           |
+| node_free_password     | boolean  | no       | false         |
+| node_pass              | list     | no       | -             |
+| node_pass.node_address | string   | no       | -             |
+| node_pass.password     | string   | no       | -             |
+| common-options         | string   | no       | -             |
+
+### database [string]
+
+database name
+
+### fields [array]
+
+The data field that needs to be output to `ClickHouse` , if not configured, it will be automatically adapted according to the data `schema` .
+
+### host [string]
+
+`ClickHouse` cluster address, the format is `host:port` , allowing multiple `hosts` to be specified. Such as `"host1:8123,host2:8123"` .
+
+### password [string]
+
+`ClickHouse user password` . This field is only required when the permission is enabled in `ClickHouse` .
+
+### table [string]
+
+table name
+
+### username [string]
+
+`ClickHouse` user username, this field is only required when permission is enabled in `ClickHouse`
+
+### sharding_key [string]
+
+When use split_mode, which node to send data to is a problem, the default is random selection, but the 
+'sharding_key' parameter can be used to specify the field for the sharding algorithm. This option only 
+worked when 'split_mode' is true.
+
+### clickhouse_local_path [string]
+
+The address of the clickhouse-local program on the spark node. Since each task needs to be called, 
+clickhouse-local should be located in the same path of each spark node.
+
+### tmp_batch_cache_line [int]
+
+SeaTunnel will use memory map technology to write temporary data to the file to cache the data that the 
+user needs to write to clickhouse. This parameter is used to configure the number of data pieces written 
+to the file each time. Most of the time you don't need to modify it.
+
+### copy_method [string]
+
+Specifies the method used to transfer files, the default is scp, optional scp and rsync
+
+### node_free_password [boolean]
+
+Because seatunnel need to use scp or rsync for file transfer, seatunnel need clickhouse server-side access.
+If each spark node and clickhouse server are configured with password-free login, 
+you can configure this option to true, otherwise you need to configure the corresponding node password in the node_pass configuration
+
+### node_pass [list]
+
+Used to save the addresses and corresponding passwords of all clickhouse servers
+
+### node_pass.node_address [string]
+
+The address corresponding to the clickhouse server
+
+### node_pass.node_password [string]
+
+The password corresponding to the clickhouse server, only support root user yet.
+
+### common options [string]
+
+Sink plugin common parameters, please refer to [common options](common-options.md) for details
+
+## ClickHouse type comparison table
+
+| ClickHouse field type | Convert plugin conversion goal type | SQL conversion expression     | Description                                           |
+| --------------------- | ----------------------------------- | ----------------------------- |-------------------------------------------------------|
+| Date                  | string                              | string()                      | `yyyy-MM-dd` Format string                            |
+| DateTime              | string                              | string()                      | `yyyy-MM-dd HH:mm:ss` Format string                   |
+| String                | string                              | string()                      |                                                       |
+| Int8                  | integer                             | int()                         |                                                       |
+| Uint8                 | integer                             | int()                         |                                                       |
+| Int16                 | integer                             | int()                         |                                                       |
+| Uint16                | integer                             | int()                         |                                                       |
+| Int32                 | integer                             | int()                         |                                                       |
+| Uint32                | long                                | bigint()                      |                                                       |
+| Int64                 | long                                | bigint()                      |                                                       |
+| Uint64                | long                                | bigint()                      |                                                       |
+| Float32               | float                               | float()                       |                                                       |
+| Float64               | double                              | double()                      |                                                       |
+| Decimal(P, S)         | -                                   | CAST(source AS DECIMAL(P, S)) | Decimal32(S), Decimal64(S), Decimal128(S) Can be used |
+| Array(T)              | -                                   | -                             |                                                       |
+| Nullable(T)           | Depends on T                        | Depends on T                  |                                                       |
+| LowCardinality(T)     | Depends on T                        | Depends on T                  |                                                       |
+
+## Examples
+
+```bash
+ClickhouseFile {
+    host = "localhost:8123"
+    database = "nginx"
+    table = "access_msg"
+    fields = ["date", "datetime", "hostname", "http_code", "data_size", "ua", "request_time"]
+    username = "username"
+    password = "password"
+    clickhouse_local_path = "/usr/bin/clickhouse-local"
+    node_free_password = true
+}
+```
+
+```bash
+ClickhouseFile {
+    host = "localhost:8123"
+    database = "nginx"
+    table = "access_msg"
+    fields = ["date", "datetime", "hostname", "http_code", "data_size", "ua", "request_time"]
+    username = "username"
+    password = "password"
+    sharding_key = "age"
+    clickhouse_local_path = "/usr/bin/Clickhouse local"
+    node_pass = [
+      {
+        node_address = "localhost1"
+        password = "password"
+      }
+      {
+        node_address = "localhost2"
+        password = "password"
+      }
+    ]
+}
+```
diff --git a/versioned_docs/version-2.1.1/connector/sink/Console.mdx b/versioned_docs/version-2.1.1/connector/sink/Console.mdx
new file mode 100644
index 00000000..8eaa8844
--- /dev/null
+++ b/versioned_docs/version-2.1.1/connector/sink/Console.mdx
@@ -0,0 +1,101 @@
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
+
+# Console
+
+## Description
+
+Output data to standard terminal or Flink taskManager, which is often used for debugging and easy to observe the data.
+
+:::tip
+
+Engine Supported and plugin name
+
+* [x] Spark: Console
+* [x] Flink: Console
+
+:::
+
+## Options
+
+<Tabs
+    groupId="engine-type"
+    defaultValue="spark"
+    values={[
+        {label: 'Spark', value: 'spark'},
+        {label: 'Flink', value: 'flink'},
+    ]}>
+<TabItem value="spark">
+
+| name           | type   | required | default value |
+| -------------- | ------ | -------- | ------------- |
+| limit          | number | no       | 100           |
+| serializer     | string | no       | plain         |
+| common-options | string | no       | -             |
+
+### limit [number]
+
+Limit the number of `rows` to be output, the legal range is `[-1, 2147483647]` , `-1` means that the output is up to `2147483647` rows
+
+### serializer [string]
+
+The format of serialization when outputting. Available serializers include: `json` , `plain`
+
+### common options [string]
+
+Sink plugin common parameters, please refer to [Sink Plugin](common-options.md) for details
+
+</TabItem>
+<TabItem value="flink">
+
+## Options
+
+| name           | type   | required | default value |
+|----------------|--------| -------- |---------------|
+| limit          | int    | no       | INT_MAX       |
+| common-options | string | no       | -             |
+
+### limit [int]
+
+limit console result lines
+
+### common options [string]
+
+Sink plugin common parameters, please refer to [Sink Plugin](common-options.md) for details
+
+</TabItem>
+</Tabs>
+
+## Examples
+
+<Tabs
+    groupId="engine-type"
+    defaultValue="spark"
+    values={[
+        {label: 'Spark', value: 'spark'},
+        {label: 'Flink', value: 'flink'},
+    ]}>
+<TabItem value="spark">
+
+```bash
+console {
+    limit = 10,
+    serializer = "json"
+}
+```
+
+> Output 10 pieces of data in Json format
+
+</TabItem>
+<TabItem value="flink">
+
+```bash
+ConsoleSink{}
+```
+
+## Note
+
+Flink's console output is in flink's WebUI
+
+</TabItem>
+</Tabs>
diff --git a/versioned_docs/version-2.1.1/connector/sink/Doris.mdx b/versioned_docs/version-2.1.1/connector/sink/Doris.mdx
new file mode 100644
index 00000000..9e1ca8de
--- /dev/null
+++ b/versioned_docs/version-2.1.1/connector/sink/Doris.mdx
@@ -0,0 +1,174 @@
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
+
+# Doris
+
+### Description:
+
+Write Data to a Doris Table.
+
+:::tip
+
+Engine Supported and plugin name
+
+* [x] Spark: Doris
+* [x] Flink: DorisSink
+
+:::
+
+### Options
+
+<Tabs
+    groupId="engine-type"
+    defaultValue="spark"
+    values={[
+        {label: 'Spark', value: 'spark'},
+        {label: 'Flink', value: 'flink'},
+    ]}>
+<TabItem value="spark">
+
+| name | type | required | default value |
+| --- | --- | --- | --- |
+| fenodes | string | yes | - |
+| database | string | yes | - |
+| table	 | string | yes | - |
+| user	 | string | yes | - |
+| password	 | string | yes | - |
+| batch_size	 | int | yes | 100 |
+| doris.*	 | string | no | - |
+
+##### fenodes [string]
+
+Doris FE address:8030
+
+##### database [string]
+
+Doris target database name
+
+##### table [string]
+
+Doris target table name
+
+##### user [string]
+
+Doris user name
+
+##### password [string]
+
+Doris user's password
+
+##### batch_size [string]
+
+Doris number of submissions per batch
+
+Default value:5000
+
+##### doris. [string]
+
+Doris stream_load properties,you can use 'doris.' prefix + stream_load properties
+[More Doris stream_load Configurations](https://doris.apache.org/administrator-guide/load-data/stream-load-manual.html)
+
+</TabItem>
+<TabItem value="flink">
+
+| name | type | required | default value |
+| --- | --- | --- | --- |
+| fenodes | string | yes | - |
+| database | string | yes | - |
+| table | string | yes | - |
+| user	 | string | yes | - |
+| password	 | string | yes | - |
+| batch_size	 | int | no |  100 |
+| interval	 | int | no |1000 |
+| max_retries	 | int | no | 1 |
+| doris.*	 | - | no | - |
+| parallelism | int | no  | - |
+
+##### fenodes [string]
+
+Doris FE http address
+
+##### database [string]
+
+Doris database name
+
+##### table [string]
+
+Doris table name
+
+##### user [string]
+
+Doris username
+
+##### password [string]
+
+Doris password
+
+##### batch_size [int]
+
+Maximum number of lines in a single write Doris,default value is 5000.
+
+##### interval [int]
+
+The flush interval millisecond, after which the asynchronous thread will write the data in the cache to Doris.Set to 0 to turn off periodic writing.
+
+Default value :5000
+
+##### max_retries [int]
+
+Number of retries after writing Doris failed
+
+##### doris.* [string]
+
+The doris stream load parameters.you can use 'doris.' prefix + stream_load properties. eg:doris.column_separator' = ','
+[More Doris stream_load Configurations](https://doris.apache.org/administrator-guide/load-data/stream-load-manual.html)
+
+### parallelism [Int]
+
+The parallelism of an individual operator, for DorisSink
+
+</TabItem>
+</Tabs>
+
+### Examples
+
+<Tabs
+    groupId="engine-type"
+    defaultValue="spark"
+    values={[
+        {label: 'Spark', value: 'spark'},
+        {label: 'Flink', value: 'flink'},
+    ]}>
+<TabItem value="spark">
+
+```conf
+Doris {
+    fenodes="0.0.0.0:8030"
+    database="test"
+    table="user"
+    user="doris"
+    password="doris"
+    batch_size=10000
+    doris.column_separator="\t"
+    doris.columns="id,user_name,user_name_cn,create_time,last_login_time"
+}
+```
+
+</TabItem>
+<TabItem value="flink">
+
+```conf
+DorisSink {
+    fenodes = "127.0.0.1:8030"
+    database = database
+    table = table
+    user = root
+    password = password
+    batch_size = 1
+    doris.column_separator="\t"
+    doris.columns="id,user_name,user_name_cn,create_time,last_login_time"
+}
+```
+
+</TabItem>
+</Tabs>
diff --git a/versioned_docs/version-2.1.1/connector/sink/Druid.md b/versioned_docs/version-2.1.1/connector/sink/Druid.md
new file mode 100644
index 00000000..534c092c
--- /dev/null
+++ b/versioned_docs/version-2.1.1/connector/sink/Druid.md
@@ -0,0 +1,106 @@
+# Druid
+
+> # Sink plugin: Druid [Flink]
+
+## Description
+
+Write data to Apache Druid.
+
+:::tip
+
+Engine Supported and plugin name
+
+* [ ] Spark
+* [x] Flink: Druid
+
+:::
+
+## Options
+
+| name                    | type     | required | default value |
+| ----------------------- | -------- | -------- | ------------- |
+| coordinator_url         | `String` | yes      | -             |
+| datasource              | `String` | yes      | -             |
+| timestamp_column        | `String` | no       | timestamp     |
+| timestamp_format        | `String` | no       | auto          |
+| timestamp_missing_value | `String` | no       | -             |
+| parallelism             | `Int`    | no       | -             |
+
+### coordinator_url [`String`]
+
+The URL of Coordinator service in Apache Druid.
+
+### datasource [`String`]
+
+The DataSource name in Apache Druid.
+
+### timestamp_column [`String`]
+
+The timestamp column name in Apache Druid, the default value is `timestamp`.
+
+### timestamp_format [`String`]
+
+The timestamp format in Apache Druid, the default value is `auto`, it could be:
+
+- `iso`
+  - ISO8601 with 'T' separator, like "2000-01-01T01:02:03.456"
+
+- `posix`
+  - seconds since epoch
+
+- `millis`
+  - milliseconds since epoch
+
+- `micro`
+  - microseconds since epoch
+
+- `nano`
+  - nanoseconds since epoch
+
+- `auto`
+  - automatically detects ISO (either 'T' or space separator) or millis format
+
+- any [Joda DateTimeFormat](http://joda-time.sourceforge.net/apidocs/org/joda/time/format/DateTimeFormat.html) string
+
+### timestamp_missing_value [`String`]
+
+The timestamp missing value in Apache Druid, which is used for input records that have a null or missing timestamp. The value of `timestamp_missing_value` should be in ISO 8601 format, for example `"2022-02-02T02:02:02.222"`.
+
+### parallelism [`Int`]
+
+The parallelism of an individual operator, for DruidSink
+
+## Example
+
+### Simple
+
+```hocon
+DruidSink {
+  coordinator_url = "http://localhost:8081/"
+  datasource = "wikipedia"
+}
+```
+
+### Specified timestamp column and format
+
+```hocon
+DruidSink {
+  coordinator_url = "http://localhost:8081/"
+  datasource = "wikipedia"
+  timestamp_column = "timestamp"
+  timestamp_format = "auto"
+}
+```
+
+### Specified timestamp column, format and missing value
+
+```hocon
+DruidSink {
+  coordinator_url = "http://localhost:8081/"
+  datasource = "wikipedia"
+  timestamp_column = "timestamp"
+  timestamp_format = "auto"
+  timestamp_missing_value = "2022-02-02T02:02:02.222"
+}
+```
+
diff --git a/versioned_docs/version-2.1.1/connector/sink/Elasticsearch.mdx b/versioned_docs/version-2.1.1/connector/sink/Elasticsearch.mdx
new file mode 100644
index 00000000..7e42df93
--- /dev/null
+++ b/versioned_docs/version-2.1.1/connector/sink/Elasticsearch.mdx
@@ -0,0 +1,118 @@
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
+
+# Elasticsearch
+
+## Description
+
+Output data to `Elasticsearch`.
+
+:::tip
+
+Engine Supported and plugin name
+
+* [x] Spark: Elasticsearch(supported `ElasticSearch version is >= 2.x and <7.0.0`)
+* [x] Flink: Elasticsearch(supported `ElasticSearch version = 7.x`, if you want use Elasticsearch version is 6.x,
+please use the source code to repackage by execute `mvn clean package -Delasticsearch=6`)
+
+:::
+
+## Options
+
+<Tabs
+    groupId="engine-type"
+    defaultValue="spark"
+    values={[
+        {label: 'Spark', value: 'spark'},
+        {label: 'Flink', value: 'flink'},
+    ]}>
+<TabItem value="spark">
+
+| name              | type   | required | default value |
+| ----------------- | ------ | -------- | ------------- |
+| hosts             | array  | yes      | -             |
+| index_type        | string | no       | -             |
+| index_time_format | string | no       | yyyy.MM.dd    |
+| index             | string | no       | seatunnel     |
+| es.*              | string | no       |               |
+| common-options    | string | no       | -             |
+
+</TabItem>
+<TabItem value="flink">
+
+| name              | type   | required | default value |
+| ----------------- | ------ | -------- | ------------- |
+| hosts             | array  | yes      | -             |
+| index_type        | string | no       | log           |
+| index_time_format | string | no       | yyyy.MM.dd    |
+| index             | string | no       | seatunnel     |
+| common-options    | string | no       | -             |
+| parallelism       | int    | no       | -             |
+
+</TabItem>
+</Tabs>
+
+### hosts [array]
+
+`Elasticsearch` cluster address, the format is `host:port` , allowing multiple hosts to be specified. Such as `["host1:9200", "host2:9200"]` .
+
+### index_type [string]
+
+`Elasticsearch` index type, it is recommended not to specify in elasticsearch 7 and above
+
+#### index_time_format [string]
+
+When the format in the `index` parameter is `xxxx-${now}` , `index_time_format` can specify the time format of the `index` name, and the default value is `yyyy.MM.dd` . The commonly used time formats are listed as follows:
+
+| Symbol | Description        |
+| ------ | ------------------ |
+| y      | Year               |
+| M      | Month              |
+| d      | Day of month       |
+| H      | Hour in day (0-23) |
+| m      | Minute in hour     |
+| s      | Second in minute   |
+
+See [Java SimpleDateFormat](https://docs.oracle.com/javase/tutorial/i18n/format/simpleDateFormat.html) for detailed time format syntax.
+
+### index [string]
+
+Elasticsearch `index` name. If you need to generate an `index` based on time, you can specify a time variable, such as `seatunnel-${now}` . `now` represents the current data processing time.
+
+<Tabs
+    groupId="engine-type"
+    defaultValue="spark"
+    values={[
+        {label: 'Spark', value: 'spark'},
+        {label: 'Flink', value: 'flink'},
+    ]}>
+<TabItem value="spark">
+
+### es.* [string]
+
+Users can also specify multiple optional parameters. For a detailed list of parameters, see [Parameters Supported by Elasticsearch](https://www.elastic.co/guide/en/elasticsearch/hadoop/current/configuration.html#cfg-mapping).
+
+For example, the way to specify `es.batch.size.entries` is: `es.batch.size.entries = 100000` . If these non-essential parameters are not specified, they will use the default values given in the official documentation.
+
+</TabItem>
+<TabItem value="flink">
+
+### parallelism [`Int`]
+
+The parallelism of an individual operator, data source, or data sink
+
+</TabItem>
+</Tabs>
+
+### common options [string]
+
+Sink plugin common parameters, please refer to [Sink Plugin](common-options.md) for details
+
+## Examples
+
+```bash
+elasticsearch {
+    hosts = ["localhost:9200"]
+    index = "seatunnel"
+}
+```
diff --git a/versioned_docs/version-2.1.1/connector/sink/Email.md b/versioned_docs/version-2.1.1/connector/sink/Email.md
new file mode 100644
index 00000000..8f50db9c
--- /dev/null
+++ b/versioned_docs/version-2.1.1/connector/sink/Email.md
@@ -0,0 +1,101 @@
+# Email
+
+## Description
+
+Supports data output through `email attachments`. The attachments are in the `xlsx` format that supports `excel` to open, which can be used to notify the task statistics results through email attachments.
+
+:::tip
+
+Engine Supported and plugin name
+
+* [x] Spark: Email
+* [ ] Flink
+
+:::
+
+## Options
+
+| name     | type    | required | default value |
+|----------|---------|----------|---------------|
+| subject  | string  | yes      | -             |
+| from     | string  | yes      | -             |
+| to       | string  | yes      | -             |
+| bodyText | string  | no       | -             |
+| bodyHtml | string  | no       | -             |
+| cc       | string  | no       | -             |
+| bcc      | string  | no       | -             |
+| host     | string  | yes      | -             |
+| port     | string  | yes      | -             |
+| password | string  | yes      | -             |
+| limit    | string  | no       | 100000        |
+| use_ssl  | boolean | no       | false         |
+| use_tls  | boolean | no       | false         |
+
+### subject [string]
+
+Email Subject
+
+### from [string]
+
+Email sender
+
+### to [string]
+
+Email recipients, multiple recipients separated by `,`
+
+### bodyText [string]
+
+Email content, text format
+
+### bodyHtml [string]
+
+Email content, hypertext content
+
+### cc [string]
+
+Email CC, multiple CCs separated by `,`
+
+### bcc [string]
+
+Email Bcc, multiple Bccs separated by `,`
+
+### host [string]
+
+Email server address, for example: `stmp.exmail.qq.com`
+
+### port [string]
+
+Email server port For example: `25`
+
+### password [string]
+
+The password of the email sender, the user name is the sender specified by `from`
+
+### limit [string]
+
+The number of rows to include, the default is `100000`
+
+### use_ssl [boolean]
+
+The security properties for encrypted link to smtp server, the default is `false`
+
+### use_tls [boolean]
+
+The security properties for encrypted link to smtp server, the default is `false`
+
+## Examples
+
+```bash
+Email {
+    subject = "Report statistics",
+    from = "xxxx@qq.com",
+    to = "xxxxx1@qq.com,xxxxx2@qq.com",
+    cc = "xxxxx3@qq.com,xxxxx4@qq.com",
+    bcc = "xxxxx5@qq.com,xxxxx6@qq.com",
+    host= "stmp.exmail.qq.com",
+    port= "25",
+    password = "***********",
+    limit = "1000",
+    use_ssl = true
+}
+```
diff --git a/versioned_docs/version-2.1.1/connector/sink/File.mdx b/versioned_docs/version-2.1.1/connector/sink/File.mdx
new file mode 100644
index 00000000..f82ad1f0
--- /dev/null
+++ b/versioned_docs/version-2.1.1/connector/sink/File.mdx
@@ -0,0 +1,190 @@
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
+
+# File
+
+## Description
+
+Output data to local or hdfs file.
+
+:::tip
+
+Engine Supported and plugin name
+
+* [x] Spark: File
+* [x] Flink: File
+
+:::
+
+## Options
+
+<Tabs
+    groupId="engine-type"
+    defaultValue="spark"
+    values={[
+        {label: 'Spark', value: 'spark'},
+        {label: 'Flink', value: 'flink'},
+    ]}>
+<TabItem value="spark">
+
+| name             | type   | required | default value  |
+| ---------------- | ------ | -------- | -------------- |
+| options          | object | no       | -              |
+| partition_by     | array  | no       | -              |
+| path             | string | yes      | -              |
+| path_time_format | string | no       | yyyyMMddHHmmss |
+| save_mode        | string | no       | error          |
+| serializer       | string | no       | json           |
+| common-options   | string | no       | -              |
+
+### options [object]
+
+Custom parameters
+
+### partition_by [array]
+
+Partition data based on selected fields
+
+### path [string]
+
+The file path is required. The `hdfs file` starts with `hdfs://` , and the `local file` starts with `file://`,
+we can add the variable `${now}` or `${uuid}` in the path, like `hdfs:///test_${uuid}_${now}.txt`, 
+`${now}` represents the current time, and its format can be defined by specifying the option `path_time_format`
+
+### path_time_format [string]
+
+When the format in the `path` parameter is `xxxx-${now}` , `path_time_format` can specify the time format of the path, and the default value is `yyyy.MM.dd` . The commonly used time formats are listed as follows:
+
+| Symbol | Description        |
+| ------ | ------------------ |
+| y      | Year               |
+| M      | Month              |
+| d      | Day of month       |
+| H      | Hour in day (0-23) |
+| m      | Minute in hour     |
+| s      | Second in minute   |
+
+See [Java SimpleDateFormat](https://docs.oracle.com/javase/tutorial/i18n/format/simpleDateFormat.html) for detailed time format syntax.
+
+### save_mode [string]
+
+Storage mode, currently supports `overwrite` , `append` , `ignore` and `error` . For the specific meaning of each mode, see [save-modes](https://spark.apache.org/docs/latest/sql-programming-guide.html#save-modes)
+
+### serializer [string]
+
+Serialization method, currently supports `csv` , `json` , `parquet` , `orc` and `text`
+
+### common options [string]
+
+Sink plugin common parameters, please refer to [Sink Plugin](common-options.md) for details
+
+</TabItem>
+<TabItem value="flink">
+
+
+| name              | type   | required | default value  |
+|-------------------|--------| -------- |----------------|
+| format            | string | yes      | -              |
+| path              | string | yes      | -              |
+| path_time_format  | string | no       | yyyyMMddHHmmss |
+| write_mode        | string | no       | -              |
+| common-options    | string | no       | -              |
+| parallelism       | int    | no       | -              |
+| rollover_interval | long   | no       | 1              |
+| max_part_size     | long   | no       | 1024          |
+| prefix            | string | no       | seatunnel      |
+| suffix            | string | no       | .ext           |
+
+### format [string]
+
+Currently, `csv` , `json` , and `text` are supported. The streaming mode currently only supports `text`
+
+### path [string]
+
+The file path is required. The `hdfs file` starts with `hdfs://` , and the `local file` starts with `file://`,
+we can add the variable `${now}` or `${uuid}` in the path, like `hdfs:///test_${uuid}_${now}.txt`,
+`${now}` represents the current time, and its format can be defined by specifying the option `path_time_format`
+
+### path_time_format [string]
+
+When the format in the `path` parameter is `xxxx-${now}` , `path_time_format` can specify the time format of the path, and the default value is `yyyy.MM.dd` . The commonly used time formats are listed as follows:
+
+| Symbol | Description        |
+| ------ | ------------------ |
+| y      | Year               |
+| M      | Month              |
+| d      | Day of month       |
+| H      | Hour in day (0-23) |
+| m      | Minute in hour     |
+| s      | Second in minute   |
+
+See [Java SimpleDateFormat](https://docs.oracle.com/javase/tutorial/i18n/format/simpleDateFormat.html) for detailed time format syntax.
+
+### write_mode [string]
+
+- NO_OVERWRITE
+
+- No overwrite, there is an error in the path
+
+- OVERWRITE
+
+- Overwrite, delete and then write if the path exists
+
+### common options [string]
+
+Sink plugin common parameters, please refer to [Sink Plugin](common-options.md) for details
+
+### parallelism [`Int`]
+
+The parallelism of an individual operator, for FileSink
+
+### rollover_interval [long]
+
+The new file part rollover interval, unit min.
+
+### max_part_size [long]
+
+The max size of each file part, unit MB.
+
+### prefix [string]
+
+The prefix of each file part.
+
+### suffix [string]
+
+The suffix of each file part.
+
+</TabItem>
+</Tabs>
+
+## Example
+
+<Tabs
+    groupId="engine-type"
+    defaultValue="spark"
+    values={[
+        {label: 'Spark', value: 'spark'},
+        {label: 'Flink', value: 'flink'},
+    ]}>
+<TabItem value="spark">
+
+```bash
+file {
+    path = "file:///var/logs"
+    serializer = "text"
+}
+```
+
+</TabItem>
+<TabItem value="flink">
+
+```bash
+FileSink {
+    format = "json"
+    path = "hdfs://localhost:9000/flink/output/"
+    write_mode = "OVERWRITE"
+}
+```
+
+</TabItem>
+</Tabs>
diff --git a/versioned_docs/version-2.1.1/connector/sink/Hbase.md b/versioned_docs/version-2.1.1/connector/sink/Hbase.md
new file mode 100644
index 00000000..0c6e8336
--- /dev/null
+++ b/versioned_docs/version-2.1.1/connector/sink/Hbase.md
@@ -0,0 +1,66 @@
+# Hbase
+
+## Description
+
+Use [hbase-connectors](https://github.com/apache/hbase-connectors/tree/master/spark) to output data to `Hbase` , `Hbase (>=2.1.0)` and `Spark (>=2.0.0)` version compatibility depends on `hbase-connectors` . The `hbase-connectors` in the official Apache Hbase documentation is also one of the [Apache Hbase Repos](https://hbase.apache.org/book.html#repos).
+
+:::tip
+
+Engine Supported and plugin name
+
+* [x] Spark: Hbase
+* [ ] Flink
+
+:::
+
+## Options
+
+| name                   | type   | required | default value |
+| ---------------------- | ------ | -------- | ------------- |
+| hbase.zookeeper.quorum | string | yes      |               |
+| catalog                | string | yes      |               |
+| staging_dir            | string | yes      |               |
+| save_mode              | string | no       | append        |
+| hbase.*                | string | no       |               |
+
+### hbase.zookeeper.quorum [string]
+
+The address of the `zookeeper` cluster, the format is: `host01:2181,host02:2181,host03:2181`
+
+### catalog [string]
+
+The structure of the `hbase` table is defined by `catalog` , the name of the `hbase` table and its `namespace` , which `columns` are used as `rowkey`, and the correspondence between `column family` and `columns` can be defined by `catalog` `hbase table catalog`
+
+### staging_dir [string]
+
+A path on `HDFS` that will generate data that needs to be loaded into `hbase` . After the data is loaded, the data file will be deleted and the directory is still there.
+
+### save_mode [string]
+
+Two write modes are supported, `overwrite` and `append` . `overwrite` means that if there is data in the `hbase table` , `truncate` will be performed and then the data will be loaded.
+
+`append` means that the original data of the `hbase table` will not be cleared, and the load operation will be performed directly.
+
+### hbase.* [string]
+
+Users can also specify multiple optional parameters. For a detailed list of parameters, see [Hbase Supported Parameters](https://hbase.apache.org/book.html#config.files).
+
+If these non-essential parameters are not specified, they will use the default values given in the official documentation.
+
+### common options [string]
+
+Sink plugin common parameters, please refer to [Sink Plugin](common-options.md) for details
+
+## Examples
+
+```bash
+ hbase {
+    source_table_name = "hive_dataset"
+    hbase.zookeeper.quorum = "host01:2181,host02:2181,host03:2181"
+    catalog = "{\"table\":{\"namespace\":\"default\", \"name\":\"customer\"},\"rowkey\":\"c_custkey\",\"columns\":{\"c_custkey\":{\"cf\":\"rowkey\", \"col\":\"c_custkey\", \"type\":\"bigint\"},\"c_name\":{\"cf\":\"info\", \"col\":\"c_name\", \"type\":\"string\"},\"c_address\":{\"cf\":\"info\", \"col\":\"c_address\", \"type\":\"string\"},\"c_city\":{\"cf\":\"info\", \"col\":\"c_city\", \"type\":\"string\"},\"c_nation\":{\"cf\":\"info\", \"col\":\"c_nation\", \"type\":\"string\"},\"c_regio [...]
+    staging_dir = "/tmp/hbase-staging/"
+    save_mode = "overwrite"
+}
+```
+
+This plugin of `Hbase` does not provide users with the function of creating tables, because the pre-partitioning method of the `hbase` table will be related to business logic, so when running the plugin, the user needs to create the `hbase` table and its pre-partition in advance; for `rowkey` Design, catalog itself supports multi-column combined `rowkey="col1:col2:col3"` , but if there are other design requirements for `rowkey` , such as `add salt` , etc., it can be completely decoupled  [...]
diff --git a/versioned_docs/version-2.1.1/connector/sink/Hive.md b/versioned_docs/version-2.1.1/connector/sink/Hive.md
new file mode 100644
index 00000000..7c57a1af
--- /dev/null
+++ b/versioned_docs/version-2.1.1/connector/sink/Hive.md
@@ -0,0 +1,70 @@
+# Hive
+
+### Description
+
+Write Rows to [Apache Hive](https://hive.apache.org).
+
+:::tip
+
+Engine Supported and plugin name
+
+* [x] Spark: Hive
+* [ ] Flink
+
+:::
+
+### Options
+
+| name                                    | type          | required | default value |
+| --------------------------------------- | ------------- | -------- | ------------- |
+| [sql](#sql-string)                             | string        | no       | -             |
+| [source_table_name](#source_table_name-string) | string        | no       | -             |
+| [result_table_name](#result_table_name-string) | string        | no       | -             |
+| [sink_columns](#sink_columns-string)           | string        | no       | -             |
+| [save_mode](#save_mode-string)                 | string        | no       | -             |
+| [partition_by](#partition_by-arraystring)           | Array[string] | no       | -             |
+
+##### sql [string]
+Hive sql:the whole insert data sql, such as `insert into/overwrite $table  select * from xxx_table `, If this option exists, other options will be ignored.
+
+##### Source_table_name [string]
+
+Datasource of this plugin.
+
+##### result_table_name [string]
+
+The output hive table name if the `sql` option doesn't specified.
+
+##### save_mode [string]
+
+Same with option `spark.mode` in Spark, combined with `result_table_name` if the `sql` option doesn't specified.
+
+##### sink_columns [string]
+
+Specify the selected fields which write to result_table_name, separated by commas, combined with `result_table_name` if the `sql` option doesn't specified.
+
+##### partition_by [Array[string]]
+
+Hive partition fields, combined with `result_table_name` if the `sql` option doesn't specified.
+
+### Example
+
+```conf
+sink {
+  Hive {
+    sql = "insert overwrite table seatunnel.test1 partition(province) select name,age,province from myTable2"
+  }
+}
+```
+
+```conf
+sink {
+  Hive {
+    source_table_name = "myTable2"
+    result_table_name = "seatunnel.test1"
+    save_mode = "overwrite"
+    sink_columns = "name,age,province"
+    partition_by = ["province"]
+  }
+}
+```
diff --git a/versioned_docs/version-2.1.1/connector/sink/Hudi.md b/versioned_docs/version-2.1.1/connector/sink/Hudi.md
new file mode 100644
index 00000000..5599bb7e
--- /dev/null
+++ b/versioned_docs/version-2.1.1/connector/sink/Hudi.md
@@ -0,0 +1,41 @@
+# Hudi
+
+## Description
+
+Write Rows to a Hudi.
+
+:::tip
+
+Engine Supported and plugin name
+
+* [x] Spark: Hudi
+* [ ] Flink
+
+:::
+
+## Options
+
+| name | type | required | default value | engine |
+| --- | --- | --- | --- | --- |
+| hoodie.base.path | string | yes | - | Spark |
+| hoodie.table.name | string | yes | - | Spark |
+| save_mode	 | string | no | append | Spark |
+
+[More hudi Configurations](https://hudi.apache.org/docs/configurations/#Write-Options)
+
+### hoodie.base.path [string]
+
+Base path on lake storage, under which all the table data is stored. Always prefix it explicitly with the storage scheme (e.g hdfs://, s3:// etc). Hudi stores all the main meta-data about commits, savepoints, cleaning audit logs etc in .hoodie directory under this base path directory.
+
+### hoodie.table.name [string]
+
+Table name that will be used for registering with Hive. Needs to be same across runs.
+
+## Examples
+
+```bash
+hudi {
+    hoodie.base.path = "hdfs://"
+    hoodie.table.name = "seatunnel_hudi"
+}
+```
diff --git a/versioned_docs/version-2.1.1/connector/sink/Iceberg.md b/versioned_docs/version-2.1.1/connector/sink/Iceberg.md
new file mode 100644
index 00000000..ce281fc7
--- /dev/null
+++ b/versioned_docs/version-2.1.1/connector/sink/Iceberg.md
@@ -0,0 +1,68 @@
+# Iceberg
+
+## Description
+
+Write data to Iceberg.
+
+:::tip
+
+Engine Supported and plugin name
+
+* [x] Spark: Iceberg
+* [ ] Flink
+
+:::
+
+## Options
+
+| name                                                         | type   | required | default value |
+| ------------------------------------------------------------ | ------ | -------- | ------------- |
+| [path](#path)                                                | string | yes      | -             |
+| [saveMode](#saveMode)                                        | string | no       | append        |
+| [target-file-size-bytes](#target-file-size-bytes)            | long   | no       | -             |
+| [check-nullability](#check-nullability)                      | bool   | no       | -             |
+| [snapshot-property.custom-key](#snapshot-property.custom-key)| string | no       | -             |
+| [fanout-enabled](#fanout-enabled)                            | bool   | no       | -             |
+| [check-ordering](#check-ordering)                            | bool   | no       | -             |
+
+
+Refer to [iceberg write options](https://iceberg.apache.org/docs/latest/spark-configuration/) for more configurations.
+
+### path
+
+Iceberg table location.
+
+### saveMode
+
+append or overwrite. Only these two modes are supported by iceberg. The default value is append.
+
+### target-file-size-bytes
+
+Overrides this table’s write.target-file-size-bytes
+
+### check-nullability
+
+Sets the nullable check on fields
+
+### snapshot-property.custom-key
+
+Adds an entry with custom-key and corresponding value in the snapshot summary
+eg: snapshot-property.aaaa="bbbb"
+
+### fanout-enabled
+
+Overrides this table’s write.spark.fanout.enabled
+
+### check-ordering
+
+Checks if input schema and table schema are same
+
+## Example
+
+```bash
+iceberg {
+    path = "hdfs://localhost:9000/iceberg/warehouse/db/table"
+  }
+```
+
+
diff --git a/versioned_docs/version-2.1.1/connector/sink/InfluxDb.md b/versioned_docs/version-2.1.1/connector/sink/InfluxDb.md
new file mode 100644
index 00000000..c896e8d6
--- /dev/null
+++ b/versioned_docs/version-2.1.1/connector/sink/InfluxDb.md
@@ -0,0 +1,88 @@
+# InfluxDB
+
+## Description
+
+Write data to InfluxDB.
+
+:::tip
+
+Engine Supported and plugin name
+
+* [ ] Spark
+* [x] Flink: InfluxDB
+
+:::
+
+## Options
+
+| name        | type           | required | default value |
+| ----------- | -------------- | -------- | ------------- |
+| server_url  | `String`       | yes      | -             |
+| username    | `String`       | no       | -             |
+| password    | `String`       | no       | -             |
+| database    | `String`       | yes      | -             |
+| measurement | `String`       | yes      | -             |
+| tags        | `List<String>` | yes      | -             |
+| fields      | `List<String>` | yes      | -             |
+| parallelism | `Int`          | no       | -             |
+
+### server_url [`String`]
+
+The URL of InfluxDB Server.
+
+### username [`String`]
+
+The username of InfluxDB Server.
+
+### password [`String`]
+
+The password of InfluxDB Server.
+
+### database [`String`]
+
+The database name in InfluxDB.
+
+### measurement [`String`]
+
+The Measurement name in InfluxDB.
+
+### tags [`List<String>`]
+
+The list of Tag in InfluxDB.
+
+### fields [`List<String>`]
+
+The list of Field in InfluxDB.
+
+### parallelism [`Int`]
+
+The parallelism of an individual operator, for InfluxDbSink
+
+
+## Example
+
+### Simple
+
+```hocon
+InfluxDbSink {
+  server_url = "http://127.0.0.1:8086/"
+  database = "influxdb"
+  measurement = "m"
+  tags = ["country", "city"]
+  fields = ["count"]
+}
+```
+
+### Auth
+
+```hocon
+InfluxDbSink {
+  server_url = "http://127.0.0.1:8086/"
+  username = "admin"
+  password = "password"
+  database = "influxdb"
+  measurement = "m"
+  tags = ["country", "city"]
+  fields = ["count"]
+}
+```
diff --git a/versioned_docs/version-2.1.1/connector/sink/Jdbc.mdx b/versioned_docs/version-2.1.1/connector/sink/Jdbc.mdx
new file mode 100644
index 00000000..607d32d0
--- /dev/null
+++ b/versioned_docs/version-2.1.1/connector/sink/Jdbc.mdx
@@ -0,0 +1,189 @@
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
+
+# Jdbc
+
+## Description
+
+Write data through jdbc
+
+:::tip
+
+Engine Supported and plugin name
+
+* [x] Spark: Jdbc
+* [x] Flink: Jdbc
+
+:::
+
+## Options
+
+<Tabs
+    groupId="engine-type"
+    defaultValue="spark"
+    values={[
+        {label: 'Spark', value: 'spark'},
+        {label: 'Flink', value: 'flink'},
+    ]}>
+<TabItem value="spark">
+
+| name             | type   | required | default value |
+|------------------| ------ |----------|---------------|
+| driver           | string | yes      | -             |
+| url              | string | yes      | -             |
+| user             | string | yes      | -             |
+| password         | string | yes      | -             |
+| dbTable          | string | yes      | -             |
+| saveMode         | string | no       | error         |
+| useSsl           | string | no       | false         |
+| customUpdateStmt | string | no       | -             |
+| duplicateIncs    | string | no       | -             |
+| showSql          | string | no       | true          |
+
+### url [string]
+
+The URL of the JDBC connection. Refer to a case: `jdbc:mysql://localhost/dbName`
+
+### user [string]
+
+username
+
+##### password [string]
+
+user password
+
+### dbTable [string]
+
+Source data table name
+
+### saveMode [string]
+
+Storage mode, add mode `update` , perform data overwrite in a specified way when inserting data key conflicts
+
+Basic mode, currently supports `overwrite` , `append` , `ignore` and `error` . For the specific meaning of each mode, see [save-modes](https://spark.apache.org/docs/latest/sql-programming-guide.html#save-modes)
+
+### useSsl [string]
+
+Configure when `saveMode` is specified as `update` , whether to enable ssl, the default value is `false`
+
+### customUpdateStmt [string]
+
+Configure when `saveMode` is specified as `update` , which is used to specify the update statement template for key conflicts
+
+Refer to the usage of `INSERT INTO table (...) values (...) ON DUPLICATE KEY UPDATE... ` of `mysql` , use placeholders or fixed values in `values`
+
+### duplicateIncs [string]
+
+Configure when `saveMode` is specified as `update` , and when the specified key conflicts, the value is updated to the existing value plus the original value
+
+### showSql
+
+Configure when `saveMode` is specified as `update` , whether to show sql
+
+</TabItem>
+<TabItem value="flink">
+
+| name              | type   | required | default value |
+| ----------------- | ------ | -------- | ------------- |
+| driver            | string | yes      | -             |
+| url               | string | yes      | -             |
+| username          | string | yes      | -             |
+| password          | string | no       | -             |
+| query             | string | yes      | -             |
+| batch_size        | int    | no       | -             |
+| source_table_name | string | yes      | -             |
+| common-options    | string | no       | -             |
+| parallelism       | int    | no       | -             |
+
+### driver [string]
+
+Driver name, such as `com.mysql.cj.jdbc.Driver` for MySQL.
+
+Warn: for license compliance, you have to provide MySQL JDBC driver yourself, e.g. copy `mysql-connector-java-xxx.jar` to `$FLINK_HOME/lib` for Standalone.
+
+### url [string]
+
+The URL of the JDBC connection. Such as: `jdbc:mysql://localhost:3306/test`
+
+### username [string]
+
+username
+
+### password [string]
+
+password
+
+### query [string]
+
+Insert statement
+
+### batch_size [int]
+
+Number of writes per batch
+
+### parallelism [int]
+
+The parallelism of an individual operator, for JdbcSink.
+
+### common options [string]
+
+Sink plugin common parameters, please refer to [Sink Plugin](common-options.md) for details
+
+</TabItem>
+</Tabs>
+
+## Examples
+
+<Tabs
+    groupId="engine-type"
+    defaultValue="spark"
+    values={[
+        {label: 'Spark', value: 'spark'},
+        {label: 'Flink', value: 'flink'},
+    ]}>
+<TabItem value="spark">
+
+```bash
+jdbc {
+    saveMode = "update",
+    url = "jdbc:mysql://ip:3306/database",
+    user = "userName",
+    password = "***********",
+    dbTable = "tableName",
+    customUpdateStmt = "INSERT INTO table (column1, column2, created, modified, yn) values(?, ?, now(), now(), 1) ON DUPLICATE KEY UPDATE column1 = IFNULL(VALUES (column1), column1), column2 = IFNULL(VALUES (column2), column2)"
+}
+```
+
+> Insert data through JDBC
+
+```bash
+jdbc {
+    saveMode = "update",
+    truncate = "true",
+    url = "jdbc:mysql://ip:3306/database",
+    user = "userName",
+    password = "***********",
+    dbTable = "tableName",
+    customUpdateStmt = "INSERT INTO table (column1, column2, created, modified, yn) values(?, ?, now(), now(), 1) ON DUPLICATE KEY UPDATE column1 = IFNULL(VALUES (column1), column1), column2 = IFNULL(VALUES (column2), column2)"
+    jdbc.connect_timeout = 10000
+    jdbc.socket_timeout = 10000
+}
+```
+> Timeout config
+
+</TabItem>
+<TabItem value="flink">
+
+```conf
+JdbcSink {
+    source_table_name = fake
+    driver = com.mysql.jdbc.Driver
+    url = "jdbc:mysql://localhost/test"
+    username = root
+    query = "insert into test(name,age) values(?,?)"
+    batch_size = 2
+}
+```
+
+</TabItem>
+</Tabs>
\ No newline at end of file
diff --git a/versioned_docs/version-2.1.1/connector/sink/Kafka.md b/versioned_docs/version-2.1.1/connector/sink/Kafka.md
new file mode 100644
index 00000000..bee26c93
--- /dev/null
+++ b/versioned_docs/version-2.1.1/connector/sink/Kafka.md
@@ -0,0 +1,62 @@
+# Kafka
+
+## Description
+
+Write Rows to a Kafka topic.
+
+:::tip
+
+Engine Supported and plugin name
+
+* [x] Spark: Kafka
+* [x] Flink: Kafka
+
+:::
+
+## Options
+
+| name                       | type   | required | default value |
+| -------------------------- | ------ | -------- | ------------- |
+| producer.bootstrap.servers | string | yes      | -             |
+| topic                      | string | yes      | -             |
+| producer.*                 | string | no       | -             |
+| semantic                   | string | no       | -             |
+| common-options             | string | no       | -             |
+
+### producer.bootstrap.servers [string]
+
+Kafka Brokers List
+
+### topic [string]
+
+Kafka Topic
+
+### producer [string]
+
+In addition to the above parameters that must be specified by the `Kafka producer` client, the user can also specify multiple non-mandatory parameters for the `producer` client, covering [all the producer parameters specified in the official Kafka document](https://kafka.apache.org/documentation.html#producerconfigs).
+
+The way to specify the parameter is to add the prefix `producer.` to the original parameter name. For example, the way to specify `request.timeout.ms` is: `producer.request.timeout.ms = 60000` . If these non-essential parameters are not specified, they will use the default values given in the official Kafka documentation.
+
+### semantic [string]
+Semantics that can be chosen. exactly_once/at_least_once/none, default is at_least_once
+
+In exactly_once, flink producer will write all messages in a Kafka transaction that will be committed to Kafka on a checkpoint.
+
+In at_least_once, flink producer will wait for all outstanding messages in the Kafka buffers to be acknowledged by the Kafka producer on a checkpoint.
+
+NONE does not provide any guarantees: messages may be lost in case of issues on the Kafka broker and messages may be duplicated in case of a Flink failure.
+
+please refer to [Flink Kafka Fault Tolerance](https://nightlies.apache.org/flink/flink-docs-release-1.14/docs/connectors/datastream/kafka/#fault-tolerance)
+
+### common options [string]
+
+Sink plugin common parameters, please refer to [Sink Plugin](common-options.md) for details
+
+## Examples
+
+```bash
+kafka {
+    topic = "seatunnel"
+    producer.bootstrap.servers = "localhost:9092"
+}
+```
diff --git a/versioned_docs/version-2.1.1/connector/sink/Kudu.md b/versioned_docs/version-2.1.1/connector/sink/Kudu.md
new file mode 100644
index 00000000..c82963ec
--- /dev/null
+++ b/versioned_docs/version-2.1.1/connector/sink/Kudu.md
@@ -0,0 +1,40 @@
+# Kudu
+
+## Description
+
+Write data to Kudu.
+
+:::tip
+
+Engine Supported and plugin name
+
+* [x] Spark: Kudu
+* [ ] Flink
+
+:::
+
+## Options
+
+| name           | type   | required | default value |
+| -------------- | ------ | -------- | ------------- |
+| [kudu_master](#kudu_master-string)            | string | yes      | -             |
+| [kudu_table](#kudu_table-string)       | string | yes      | -         |
+| [mode](#mode-string)       | string | no      | insert         |
+
+### kudu_master [string]
+Kudu master, multiple masters are separated by commas
+
+### kudu_table [string]
+The name of the table to be written in kudu, the table must already exist
+
+### mode [string]
+Write the mode adopted in kudu, support insert|update|upsert|insertIgnore, the default is insert.
+## Example
+
+```bash
+kudu {
+   kudu_master="hadoop01:7051,hadoop02:7051,hadoop03:7051"
+   kudu_table="my_kudu_table"
+   mode="upsert"
+ }
+```
diff --git a/versioned_docs/version-2.1.1/connector/sink/MongoDB.md b/versioned_docs/version-2.1.1/connector/sink/MongoDB.md
new file mode 100644
index 00000000..8004acfe
--- /dev/null
+++ b/versioned_docs/version-2.1.1/connector/sink/MongoDB.md
@@ -0,0 +1,49 @@
+# MongoDB
+
+## Description
+
+Write data to `MongoDB`
+
+:::tip
+
+Engine Supported and plugin name
+
+* [x] Spark: MongoDB
+* [ ] Flink
+
+:::
+
+## Options
+
+| name                   | type   | required | default value |
+|------------------------| ------ |----------| ------------- |
+| writeconfig.uri        | string | yes      | -             |
+| writeconfig.database   | string | yes      | -             |
+| writeconfig.collection | string | yes      | -             |
+| writeconfig.*          | string | no       | -             |
+
+### writeconfig.uri [string]
+
+uri to write to mongoDB
+
+### writeconfig.database [string]
+
+database to write to mongoDB
+
+### writeconfig.collection [string]
+
+collection to write to mongoDB
+
+### writeconfig.* [string]
+
+More other parameters can be configured here, see [MongoDB Configuration](https://docs.mongodb.com/spark-connector/current/configuration/) for details, see the Output Configuration section. The way to specify parameters is to add a prefix to the original parameter name `writeconfig.` For example, the way to set `localThreshold` is `writeconfig.localThreshold=20` . If you do not specify these optional parameters, the default values of the official MongoDB documentation will be used.
+
+## Examples
+
+```bash
+mongodb {
+    writeconfig.uri = "mongodb://username:password@127.0.0.1:27017/test_db"
+    writeconfig.database = "test_db"
+    writeconfig.collection = "test_collection"
+}
+```
diff --git a/versioned_docs/version-2.1.1/connector/sink/Phoenix.md b/versioned_docs/version-2.1.1/connector/sink/Phoenix.md
new file mode 100644
index 00000000..0f4c69d6
--- /dev/null
+++ b/versioned_docs/version-2.1.1/connector/sink/Phoenix.md
@@ -0,0 +1,53 @@
+# Phoenix
+
+## Description
+
+Export data to `Phoenix` , compatible with `Kerberos` authentication
+
+:::tip
+
+Engine Supported and plugin name
+
+* [x] Spark: Phoenix
+* [ ] Flink
+
+:::
+
+## Options
+
+| name                      | type    | required | default value |
+| ------------------------- | ------- | -------- | ------------- |
+| zk-connect                | array   | yes      | -             |
+| table                     | string  | yes      | -             |
+| tenantId                  | string  | no       | -             |
+| skipNormalizingIdentifier | boolean | no       | false         |
+| common-options            | string  | no       | -             |
+
+### zk-connect [string]
+
+Connection string, configuration example: `host1:2181,host2:2181,host3:2181 [/znode]`
+
+### table [string]
+
+Target table name
+
+##### tenantId [string]
+
+Tenant ID, optional configuration item
+
+### skipNormalizingIdentifier [boolean]
+
+Whether to skip the normalized identifier, if the column name is surrounded by double quotes, it is used as is, otherwise the name is uppercase. Optional configuration items, the default is `false`
+
+### common options [string]
+
+Sink plugin common parameters, please refer to [Sink Plugin](common-options.md) for details
+
+## Examples
+
+```bash
+  Phoenix {
+    zk-connect = "host1:2181,host2:2181,host3:2181"
+    table = "tableName"
+  }
+```
diff --git a/versioned_docs/version-2.1.1/connector/sink/Redis.md b/versioned_docs/version-2.1.1/connector/sink/Redis.md
new file mode 100644
index 00000000..48e36574
--- /dev/null
+++ b/versioned_docs/version-2.1.1/connector/sink/Redis.md
@@ -0,0 +1,82 @@
+# Redis
+
+## Description
+
+Write Rows to a Redis.
+
+:::tip
+
+Engine Supported and plugin name
+
+* [x] Spark: Redis
+* [ ] Flink
+
+:::
+
+## Options
+
+| name      | type   | required | default value |
+|-----------|--------|----------|---------------|
+| host      | string | no       | "localhost"   |
+| port      | int    | no       | 6379          |
+| auth      | string | no       |               |
+| db_num    | int    | no       | 0             |
+| data_type | string | no       | "KV"          |
+| hash_name | string | no       |               |
+| list_name | string | no       |               |
+| set_name  | string | no       |               |
+| zset_name | string | no       |               |
+| timeout   | int    | no       | 2000          |
+
+### host [string]
+
+Redis server address, default `"localhost"`
+
+### port [int]
+
+Redis service port, default `6379`
+
+### auth [string]
+
+Redis authentication password
+
+### db_num [int]
+
+Redis database index ID. It is connected to db `0` by default
+
+### redis_timeout [int]
+
+Redis timeout
+
+### data_type [string]
+
+Redis data type eg: `KV HASH LIST SET ZSET`
+
+### hash_name [string]
+
+if redis data type is HASH must config hash name 
+
+### list_name [string]
+
+if redis data type is LIST must config list name
+
+### zset_name [string]
+
+if redis data type is ZSET must config zset name
+
+### set_name [string]
+
+if redis data type is SET must config set name
+
+## Examples
+
+```bash
+redis {
+  host = "localhost"
+  port = 6379
+  auth = "myPassword"
+  db_num = 1
+  data_type = "HASH"
+  hash_name = "test"
+}
+```
diff --git a/versioned_docs/version-2.1.1/connector/sink/Tidb.md b/versioned_docs/version-2.1.1/connector/sink/Tidb.md
new file mode 100644
index 00000000..5a658838
--- /dev/null
+++ b/versioned_docs/version-2.1.1/connector/sink/Tidb.md
@@ -0,0 +1,86 @@
+# TiDb
+
+### Description
+
+Write data to TiDB.
+
+:::tip
+
+Engine Supported and plugin name
+
+* [x] Spark: TiDb
+* [ ] Flink
+
+:::
+
+### Env Options
+
+| name           | type   | required | default value |
+| -------------- | ------ | -------- | ------------- |
+| [spark.tispark.pd.addresses](#spark.tispark.pd.addresses-string)       | string | yes      | -             |
+| [spark.sql.extensions](#spark.sql.extensions-string)        | string | yes      | org.apache.spark.sql.TiExtensions         |
+
+##### spark.tispark.pd.addresses [string]
+
+TiDB Pd Address
+
+##### spark.sql.extensions [string]
+
+Spark Sql Extensions
+
+### Options
+
+| name             | type   | required | default value |
+|------------------| ------ |----------|---------------|
+| [addr](#addr-string)              | string | yes      | -             |
+| [port](#port-string)              | string | yes      | -             |
+| [user](#user-string)             | string | yes      | -             |
+| [password](#password-string)         | string | yes      | -             |
+| [table](#table-string)            | string | yes      | -             |
+| [database](#database-string)        | string | yes       |        |
+
+##### addr [string]
+
+TiDB address, which currently only supports one instance
+
+##### port [string]
+
+TiDB port
+
+##### user [string]
+
+TiDB user
+
+##### password [string]
+
+TiDB password
+
+##### table [string]
+
+TiDB table name
+
+##### database [string]
+
+TiDB database name
+
+##### options
+
+Refer to [TiSpark Configurations](https://github.com/pingcap/tispark/blob/v2.4.1/docs/datasource_api_userguide.md)
+
+### Examples
+
+```bash
+env {
+    spark.tispark.pd.addresses = "127.0.0.1:2379"
+    spark.sql.extensions = "org.apache.spark.sql.TiExtensions"
+}
+
+tidb {
+    addr = "127.0.0.1",
+    port = "4000"
+    database = "database",
+    table = "tableName",
+    user = "userName",
+    password = "***********"
+}
+```
diff --git a/versioned_docs/version-2.1.1/connector/sink/common-options.md b/versioned_docs/version-2.1.1/connector/sink/common-options.md
new file mode 100644
index 00000000..620ef3bb
--- /dev/null
+++ b/versioned_docs/version-2.1.1/connector/sink/common-options.md
@@ -0,0 +1,45 @@
+# Common Options
+
+## Sink Plugin common parameters
+
+| name              | type   | required | default value |
+| ----------------- | ------ | -------- | ------------- |
+| source_table_name | string | no       | -             |
+
+### source_table_name [string]
+
+When `source_table_name` is not specified, the current plug-in processes the data set `dataset` output by the previous plugin in the configuration file;
+
+When `source_table_name` is specified, the current plug-in is processing the data set corresponding to this parameter.
+
+## Examples
+
+```bash
+source {
+    FakeSourceStream {
+      result_table_name = "fake"
+      field_name = "name,age"
+    }
+}
+
+transform {
+    sql {
+      source_table_name = "fake"
+      sql = "select name from fake"
+      result_table_name = "fake_name"
+    }
+    sql {
+      source_table_name = "fake"
+      sql = "select age from fake"
+      result_table_name = "fake_age"
+    }
+}
+
+sink {
+    console {
+      source_table_name = "fake_name"
+    }
+}
+```
+
+> If `source_table_name` is not specified, the console outputs the data of the last transform, and if it is set to `fake_name` , it will output the data of `fake_name`
diff --git a/versioned_docs/version-2.1.1/connector/source/Druid.md b/versioned_docs/version-2.1.1/connector/source/Druid.md
new file mode 100644
index 00000000..fc5e6485
--- /dev/null
+++ b/versioned_docs/version-2.1.1/connector/source/Druid.md
@@ -0,0 +1,65 @@
+# Druid
+
+## Description
+
+Read data from Apache Druid.
+
+:::tip
+
+Engine Supported and plugin name
+
+* [ ] Spark
+* [x] Flink: Druid
+
+:::
+
+## Options
+
+| name       | type           | required | default value |
+| ---------- | -------------- | -------- | ------------- |
+| jdbc_url   | `String`       | yes      | -             |
+| datasource | `String`       | yes      | -             |
+| start_date | `String`       | no       | -             |
+| end_date   | `String`       | no       | -             |
+| columns    | `List<String>` | no       | `*`           |
+| parallelism      | `Int`    | no       | -             |
+
+### jdbc_url [`String`]
+
+The URL of JDBC of Apache Druid.
+
+### datasource [`String`]
+
+The DataSource name in Apache Druid.
+
+### start_date [`String`]
+
+The start date of DataSource, for example, `'2016-06-27'`, `'2016-06-27 00:00:00'`, etc.
+
+### end_date [`String`]
+
+The end date of DataSource, for example, `'2016-06-28'`, `'2016-06-28 00:00:00'`, etc.
+
+### columns [`List<String>`]
+
+These columns that you want to query of DataSource.
+
+### common options [string]
+
+Source Plugin common parameters, refer to [Source Plugin](common-options.mdx) for details
+
+### parallelism [`Int`]
+
+The parallelism of an individual operator, for DruidSource
+
+## Example
+
+```hocon
+DruidSource {
+  jdbc_url = "jdbc:avatica:remote:url=http://localhost:8082/druid/v2/sql/avatica/"
+  datasource = "wikipedia"
+  start_date = "2016-06-27 00:00:00"
+  end_date = "2016-06-28 00:00:00"
+  columns = ["flags","page"]
+}
+```
diff --git a/versioned_docs/version-2.1.1/connector/source/Elasticsearch.md b/versioned_docs/version-2.1.1/connector/source/Elasticsearch.md
new file mode 100644
index 00000000..4a68ab54
--- /dev/null
+++ b/versioned_docs/version-2.1.1/connector/source/Elasticsearch.md
@@ -0,0 +1,62 @@
+# Elasticsearch
+
+## Description
+
+Read data from Elasticsearch
+
+:::tip 
+
+Engine Supported and plugin name
+
+* [x] Spark: Elasticsearch
+* [ ] Flink
+
+:::
+
+## Options
+
+| name           | type   | required | default value |
+| -------------- | ------ | -------- | ------------- |
+| hosts          | array  | yes      | -             |
+| index          | string | yes      |               |
+| es.*           | string | no       |               |
+| common-options | string | yes      | -             |
+
+### hosts [array]
+
+ElasticSearch cluster address, the format is host:port, allowing multiple hosts to be specified. Such as `["host1:9200", "host2:9200"]` .
+
+### index [string]
+
+ElasticSearch index name, support * fuzzy matching
+
+### es.* [string]
+
+Users can also specify multiple optional parameters. For a detailed list of parameters, see [Parameters Supported by Elasticsearch](https://www.elastic.co/guide/en/elasticsearch/hadoop/current/configuration.html#cfg-mapping).
+
+For example, the way to specify `es.read.metadata` is: `es.read.metadata = true` . If these non-essential parameters are not specified, they will use the default values given in the official documentation.
+
+### common options [string]
+
+Source plugin common parameters, please refer to [Source Plugin](common-options.mdx) for details
+
+## Examples
+
+```bash
+elasticsearch {
+    hosts = ["localhost:9200"]
+    index = "seatunnel-20190424"
+    result_table_name = "my_dataset"
+}
+```
+
+```bash
+elasticsearch {
+    hosts = ["localhost:9200"]
+    index = "seatunnel-*"
+    es.read.field.include = "name, age"
+    resulttable_name = "my_dataset"
+}
+```
+
+> Matches all indexes starting with `seatunnel-` , and only reads the two fields `name` and `age` .
diff --git a/versioned_docs/version-2.1.1/connector/source/Fake.mdx b/versioned_docs/version-2.1.1/connector/source/Fake.mdx
new file mode 100644
index 00000000..7d963484
--- /dev/null
+++ b/versioned_docs/version-2.1.1/connector/source/Fake.mdx
@@ -0,0 +1,135 @@
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
+
+# Fake
+
+## Description
+
+`Fake` is mainly used to conveniently generate user-specified data, which is used as input for functional verification, testing, and performance testing of seatunnel.
+
+:::note
+
+Engine Supported and plugin name
+
+* [x] Spark: Fake, FakeStream
+* [x] Flink: FakeSource, FakeSourceStream
+    * Flink `Fake Source` is mainly used to automatically generate data. The data has only two columns. The first column is of `String type` and the content is a random one from `["Gary", "Ricky Huo", "Kid Xiong"]` . The second column is of `Long type` , which is The current 13-bit timestamp is used as input for functional verification and testing of `seatunnel` .
+
+:::
+
+## Options
+
+<Tabs
+    groupId="engine-type"
+    defaultValue="spark"
+    values={[
+        {label: 'Spark', value: 'spark'},
+        {label: 'Flink', value: 'flink'},
+    ]}>
+<TabItem value="spark">
+
+:::note
+
+These options is for Spark:`FakeStream`, and Spark:`Fake` do not have any options
+
+:::
+
+| name           | type   | required | default value |
+| -------------- | ------ | -------- | ------------- |
+| content        | array  | no       | -             |
+| rate           | number | yes      | -             |
+| common-options | string | yes      | -             |
+
+### content [array]
+
+List of test data strings
+
+### rate [number]
+
+Number of test cases generated per second
+
+</TabItem>
+<TabItem value="flink">
+
+| name           | type   | required | default value |
+| -------------- | ------ | -------- | ------------- |
+| parallelism    | `Int`  | no       | -             |
+| common-options |`string`| no       | -             |
+
+### parallelism [`Int`]
+
+The parallelism of an individual operator, for Fake Source Stream
+
+</TabItem>
+</Tabs>
+
+### common options [string]
+
+Source plugin common parameters, please refer to [Source Plugin](common-options.mdx) for details
+
+## Examples
+
+<Tabs
+    groupId="engine-type"
+    defaultValue="spark"
+    values={[
+        {label: 'Spark', value: 'spark'},
+        {label: 'Flink', value: 'flink'},
+    ]}>
+<TabItem value="spark">
+
+### Fake
+
+```bash
+Fake {
+    result_table_name = "my_dataset"
+}
+```
+
+### FakeStream
+
+```bash
+fakeStream {
+    content = ['name=ricky&age=23', 'name=gary&age=28']
+    rate = 5
+}
+```
+
+The generated data is as follows, randomly extract the string from the `content` list
+
+```bash
++-----------------+
+|raw_message      |
++-----------------+
+|name=gary&age=28 |
+|name=ricky&age=23|
++-----------------+
+```
+
+</TabItem>
+<TabItem value="flink">
+
+### FakeSource
+
+```bash
+source {
+    FakeSourceStream {
+        result_table_name = "fake"
+        field_name = "name,age"
+    }
+}
+```
+
+### FakeSourceStream
+
+```bash
+source {
+    FakeSource {
+        result_table_name = "fake"
+        field_name = "name,age"
+    }
+}
+```
+
+</TabItem>
+</Tabs>
\ No newline at end of file
diff --git a/versioned_docs/version-2.1.1/connector/source/FeishuSheet.md b/versioned_docs/version-2.1.1/connector/source/FeishuSheet.md
new file mode 100644
index 00000000..744875d1
--- /dev/null
+++ b/versioned_docs/version-2.1.1/connector/source/FeishuSheet.md
@@ -0,0 +1,59 @@
+# Feishu Sheet
+
+## Description
+
+Get data from Feishu sheet
+
+:::tip
+
+Engine Supported and plugin name
+
+* [x] Spark: FeishuSheet
+* [ ] Flink
+
+:::
+
+## Options
+
+| name           | type   | required | default value       |
+| ---------------| ------ |----------|---------------------|
+| appId          | string | yes      | -                   |
+| appSecret      | string | yes      | -                   |
+| sheetToken     | string | yes      | -                   |
+| range          | string | no       | all values in sheet |
+| sheetNum       | int    | no       | 1                   |
+| titleLineNum   | int    | no       | 1                   |
+| ignoreTitleLine| bool   | no       | true                |
+
+* appId and appSecret
+  * These two parameters need to get from Feishu open platform.
+  * And open the sheet permission in permission management tab.
+* sheetToken
+  * If you Feishu sheet link is https://xxx.feishu.cn/sheets/shtcnGxninxxxxxxx
+  and the "shtcnGxninxxxxxxx" is sheetToken.
+* range 
+  * The format is A1:D5 or A2:C4 and so on.
+* sheetNum
+  * If you want import first sheet you can input 1 and the default value is 1.
+  * If you want import second one you should input 2.
+* titleLineNum
+  * The default title line the first line.
+  * If you title line is not first, you can change number for it. Like 2, 3 or 5.
+* ignoreTitleLine
+  * The title line it not save to data, if you want to save title line to data, you can set value as false.
+
+### common options [string]
+
+Source plugin common parameters, please refer to [Source Plugin](common-options.mdx) for details
+
+## Example
+
+```bash
+    FeishuSheet {
+        result_table_name = "my_dataset"
+        appId = "cli_a2cbxxxxxx"
+        appSecret = "IvhtW7xxxxxxxxxxxxxxx"
+        sheetToken = "shtcn6K3DIixxxxxxxxxxxx"
+        # range = "A1:D4"
+    }
+```
\ No newline at end of file
diff --git a/versioned_docs/version-2.1.1/connector/source/File.mdx b/versioned_docs/version-2.1.1/connector/source/File.mdx
new file mode 100644
index 00000000..21487873
--- /dev/null
+++ b/versioned_docs/version-2.1.1/connector/source/File.mdx
@@ -0,0 +1,124 @@
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
+
+# File
+
+## Description
+
+read data from local or hdfs file.
+
+Write Data to a Doris Table.
+
+:::tip
+
+Engine Supported and plugin name
+
+* [x] Spark: File
+* [x] Flink: File
+
+:::
+
+## Options
+
+<Tabs
+    groupId="engine-type"
+    defaultValue="spark"
+    values={[
+        {label: 'Spark', value: 'spark'},
+        {label: 'Flink', value: 'flink'},
+    ]}>
+<TabItem value="spark">
+
+| name | type | required | default value |
+| --- | --- | --- | --- |
+| format | string | no | json |
+| path | string | yes | - |
+| common-options| string | yes | - |
+
+##### format [string]
+
+Format for reading files, currently supports text, parquet, json, orc, csv.
+
+</TabItem>
+<TabItem value="flink">
+
+| name           | type   | required | default value |
+| -------------- | ------ | -------- | ------------- |
+| format.type    | string | yes      | -             |
+| path           | string | yes      | -             |
+| schema         | string | yes      | -             |
+| common-options | string | no       | -             |
+| parallelism    | int    | no       | -             |
+
+### format.type [string]
+
+The format for reading files from the file system, currently supports `csv` , `json` , `parquet` , `orc` and `text` .
+
+### schema [string]
+
+- csv
+    - The `schema` of `csv` is a string of `jsonArray` , such as `"[{\"type\":\"long\"},{\"type\":\"string\"}]"` , this can only specify the type of the field , The field name cannot be specified, and the common configuration parameter `field_name` is generally required.
+- json
+    - The `schema` parameter of `json` is to provide a `json string` of the original data, and the `schema` can be automatically generated, but the original data with the most complete content needs to be provided, otherwise the fields will be lost.
+- parquet
+    - The `schema` of `parquet` is an `Avro schema string` , such as `{\"type\":\"record\",\"name\":\"test\",\"fields\":[{\"name\" :\"a\",\"type\":\"int\"},{\"name\":\"b\",\"type\":\"string\"}]}` .
+- orc
+    - The `schema` of `orc` is the string of `orc schema` , such as `"struct<name:string,addresses:array<struct<street:string,zip:smallint>>>"` .
+- text
+    - The `schema` of `text` can be filled with `string` .
+
+### parallelism [`Int`]
+
+The parallelism of an individual operator, for FileSource
+
+</TabItem>
+</Tabs>
+
+##### path [string]
+
+- If read data from hdfs , the file path should start with `hdfs://`  
+- If read data from local , the file path should start with `file://`
+
+### common options [string]
+
+Source plugin common parameters, please refer to [Source Plugin](common-options.mdx) for details
+
+## Examples
+
+<Tabs
+    groupId="engine-type"
+    defaultValue="spark"
+    values={[
+        {label: 'Spark', value: 'spark'},
+        {label: 'Flink', value: 'flink'},
+    ]}>
+<TabItem value="spark">
+
+```
+file {
+    path = "hdfs:///var/logs"
+    result_table_name = "access_log"
+}
+```
+
+```
+file {
+    path = "file:///var/logs"
+    result_table_name = "access_log"
+}
+```
+
+</TabItem>
+<TabItem value="flink">
+
+```bash
+    FileSource{
+    path = "hdfs://localhost:9000/input/"
+    format.type = "json"
+    schema = "{\"data\":[{\"a\":1,\"b\":2},{\"a\":3,\"b\":4}],\"db\":\"string\",\"q\":{\"s\":\"string\"}}"
+    result_table_name = "test"
+}
+```
+
+</TabItem>
+</Tabs>
\ No newline at end of file
diff --git a/versioned_docs/version-2.1.1/connector/source/Hbase.md b/versioned_docs/version-2.1.1/connector/source/Hbase.md
new file mode 100644
index 00000000..a96c7852
--- /dev/null
+++ b/versioned_docs/version-2.1.1/connector/source/Hbase.md
@@ -0,0 +1,44 @@
+# HBase
+
+## Description
+
+Get data from HBase
+
+:::tip
+
+Engine Supported and plugin name
+
+* [x] Spark: HBase
+* [ ] Flink
+
+:::
+
+## Options
+
+| name           | type   | required | default value |
+| -------------- | ------ | -------- | ------------- |
+| hbase.zookeeper.quorum | string | yes      |               |
+| catalog                | string | yes      |               |
+| common-options| string | yes | - |
+
+### hbase.zookeeper.quorum [string]
+
+The address of the `zookeeper` cluster, the format is: `host01:2181,host02:2181,host03:2181`
+
+### catalog [string]
+
+The structure of the `hbase` table is defined by `catalog` , the name of the `hbase` table and its `namespace` , which `columns` are used as `rowkey`, and the correspondence between `column family` and `columns` can be defined by `catalog` `hbase table catalog`
+
+### common options [string]
+
+Source plugin common parameters, please refer to [Source Plugin](common-options.mdx) for details
+
+## Example
+
+```bash
+  Hbase {
+    hbase.zookeeper.quorum = "localhost:2181"
+    catalog = "{\"table\":{\"namespace\":\"default\", \"name\":\"test\"},\"rowkey\":\"id\",\"columns\":{\"id\":{\"cf\":\"rowkey\", \"col\":\"id\", \"type\":\"string\"},\"a\":{\"cf\":\"f1\", \"col\":\"a\", \"type\":\"string\"},\"b\":{\"cf\":\"f1\", \"col\":\"b\", \"type\":\"string\"},\"c\":{\"cf\":\"f1\", \"col\":\"c\", \"type\":\"string\"}}}"
+    result_table_name = "my_dataset"
+  }
+```
\ No newline at end of file
diff --git a/versioned_docs/version-2.1.1/connector/source/Hive.md b/versioned_docs/version-2.1.1/connector/source/Hive.md
new file mode 100644
index 00000000..9fd71abe
--- /dev/null
+++ b/versioned_docs/version-2.1.1/connector/source/Hive.md
@@ -0,0 +1,64 @@
+# Hive
+
+## Description
+
+Get data from hive
+
+:::tip
+
+Engine Supported and plugin name
+
+* [x] Spark: Hive
+* [ ] Flink
+
+:::
+
+## Options
+
+| name           | type   | required | default value |
+| -------------- | ------ | -------- | ------------- |
+| pre_sql        | string | yes      | -             |
+| common-options | string | yes      | -             |
+
+### pre_sql [string]
+
+For preprocessed `sql` , if preprocessing is not required, you can use `select * from hive_db.hive_table` .
+
+### common options [string]
+
+Source plugin common parameters, please refer to [Source Plugin](common-options.mdx) for details
+
+**Note: The following configuration must be done to use hive source:**
+
+```bash
+# In the spark section in the seatunnel configuration file:
+
+env {
+  ...
+  spark.sql.catalogImplementation = "hive"
+  ...
+}
+```
+
+## Example
+
+```bash
+env {
+  ...
+  spark.sql.catalogImplementation = "hive"
+  ...
+}
+
+source {
+  hive {
+    pre_sql = "select * from mydb.mytb"
+    result_table_name = "myTable"
+  }
+}
+
+...
+```
+
+## Notes
+
+It must be ensured that the `metastore` of `hive` is in service. Start the command `hive --service metastore` service `default port 9083` `cluster` , `client` , `local`  mode, `hive-site.xml` must be placed in the `$HADOOP_CONF` directory of the task submission node (or placed under `$SPARK_HOME/conf` ), IDE local Debug put it in the `resources` directory
diff --git a/versioned_docs/version-2.1.1/connector/source/Http.md b/versioned_docs/version-2.1.1/connector/source/Http.md
new file mode 100644
index 00000000..84b74c3d
--- /dev/null
+++ b/versioned_docs/version-2.1.1/connector/source/Http.md
@@ -0,0 +1,61 @@
+# Http
+
+## Description
+
+Get data from http or https interface
+
+:::tip
+
+Engine Supported and plugin name
+
+* [x] Spark: Http
+* [ ] Flink
+
+:::
+
+## Options
+
+| name           | type   | required | default vale |
+| -------------- | ------ | -------- | ------------ |
+| url            | string | yes      | -            |
+| method         | string | no       | GET          |
+| header         | string | no       |              |
+| request_params | string | no       |              |
+| sync_path      | string | no       |              |
+
+### url [string]
+
+HTTP request path, starting with http:// or https://.
+
+### method[string]
+
+HTTP request method, GET or POST, default GET.
+
+### header[string]
+
+HTTP request header, json format.
+
+### request_params[string]
+
+HTTP request parameters, json format.
+
+### sync_path[string]
+
+HTTP multiple requests, the storage path of parameters used for synchronization (hdfs).
+
+### common options [string]
+
+Source plugin common parameters, please refer to [Source Plugin](common-options.mdx) for details.
+
+## Example
+
+```bash
+ Http {
+    url = "http://date.jsontest.com/"
+    result_table_name= "response_body"
+   }
+```
+
+## Notes
+
+According to the processing result of the http call, to determine whether the synchronization parameters need to be updated, it needs to be written to hdfs through the hdfs sink plugin after the judgment is made outside the http source plugin.
diff --git a/versioned_docs/version-2.1.1/connector/source/Hudi.md b/versioned_docs/version-2.1.1/connector/source/Hudi.md
new file mode 100644
index 00000000..31487726
--- /dev/null
+++ b/versioned_docs/version-2.1.1/connector/source/Hudi.md
@@ -0,0 +1,76 @@
+# Hudi
+
+## Description
+
+Read data from Hudi.
+
+:::tip
+
+Engine Supported and plugin name
+
+* [x] Spark: Hudi
+* [ ] Flink
+
+:::
+
+## Options
+
+| name           | type   | required | default value |
+| -------------- | ------ | -------- | ------------- |
+| [hoodie.datasource.read.paths](#hoodiedatasourcereadpaths) | string | yes      | -             |
+| [hoodie.file.index.enable](#hoodiefileindexenable)  | boolean | no      | -             |
+| [hoodie.datasource.read.end.instanttime](#hoodiedatasourcereadendinstanttime)          | string | no      | -             |
+| [hoodie.datasource.write.precombine.field](#hoodiedatasourcewriteprecombinefield)            | string | no      | -             |
+| [hoodie.datasource.read.incr.filters](#hoodiedatasourcereadincrfilters)       | string | no      | -             |
+| [hoodie.datasource.merge.type](#hoodiedatasourcemergetype)  | string | no      | -             |
+| [hoodie.datasource.read.begin.instanttime](#hoodiedatasourcereadbegininstanttime)            | string | no      | -             |
+| [hoodie.enable.data.skipping](#hoodieenabledataskipping)   | string | no      | -             |
+| [as.of.instant](#asofinstant)    | string | no      | -             |
+| [hoodie.datasource.query.type](#hoodiedatasourcequerytype)         | string | no      | -             |
+| [hoodie.datasource.read.schema.use.end.instanttime](#hoodiedatasourcereadschemauseendinstanttime)      | string | no      | -             |
+
+Refer to [hudi read options](https://hudi.apache.org/docs/configurations/#Read-Options) for configurations.
+
+### hoodie.datasource.read.paths
+
+Comma separated list of file paths to read within a Hudi table.
+
+### hoodie.file.index.enable
+Enables use of the spark file index implementation for Hudi, that speeds up listing of large tables.
+
+### hoodie.datasource.read.end.instanttime
+Instant time to limit incrementally fetched data to. New data written with an instant_time <= END_INSTANTTIME are fetched out.
+
+### hoodie.datasource.write.precombine.field
+Field used in preCombining before actual write. When two records have the same key value, we will pick the one with the largest value for the precombine field, determined by Object.compareTo(..)
+
+### hoodie.datasource.read.incr.filters
+For use-cases like DeltaStreamer which reads from Hoodie Incremental table and applies opaque map functions, filters appearing late in the sequence of transformations cannot be automatically pushed down. This option allows setting filters directly on Hoodie Source.
+
+### hoodie.datasource.merge.type
+For Snapshot query on merge on read table, control whether we invoke the record payload implementation to merge (payload_combine) or skip merging altogetherskip_merge
+
+### hoodie.datasource.read.begin.instanttime
+Instant time to start incrementally pulling data from. The instanttime here need not necessarily correspond to an instant on the timeline. New data written with an instant_time > BEGIN_INSTANTTIME are fetched out. For e.g: ‘20170901080000’ will get all new data written after Sep 1, 2017 08:00AM.
+
+### hoodie.enable.data.skipping
+enable data skipping to boost query after doing z-order optimize for current table
+
+### as.of.instant
+The query instant for time travel. Without specified this option, we query the latest snapshot.
+
+### hoodie.datasource.query.type
+Whether data needs to be read, in incremental mode (new data since an instantTime) (or) Read Optimized mode (obtain latest view, based on base files) (or) Snapshot mode (obtain latest view, by merging base and (if any) log files)
+
+### hoodie.datasource.read.schema.use.end.instanttime
+Uses end instant schema when incrementally fetched data to. Default: users latest instant schema.
+
+## Example
+
+```bash
+hudi {
+    hoodie.datasource.read.paths = "hdfs://"
+}
+```
+
+
diff --git a/versioned_docs/version-2.1.1/connector/source/Iceberg.md b/versioned_docs/version-2.1.1/connector/source/Iceberg.md
new file mode 100644
index 00000000..f7b09844
--- /dev/null
+++ b/versioned_docs/version-2.1.1/connector/source/Iceberg.md
@@ -0,0 +1,59 @@
+# Iceberg
+
+## Description
+
+Read data from Iceberg.
+
+:::tip
+
+Engine Supported and plugin name
+
+* [x] Spark: Iceberg
+* [ ] Flink
+
+:::
+
+## Options
+
+| name           | type   | required | default value |
+| -------------- | ------ | -------- | ------------- |
+| common-options |        | yes      | -             |
+| [path](#path)  | string | yes      | -             |
+| [pre_sql](#pre_sql) | string | yes | -             |
+| [snapshot-id](#snapshot-id) | long | no      | -   |
+| [as-of-timestamp](#as-of-timestamp) | long | no| - |
+
+
+Refer to [iceberg read options](https://iceberg.apache.org/docs/latest/spark-configuration/) for more configurations.
+
+### common-options
+
+Source plugin common parameters, please refer to [Source Plugin](common-options.mdx) for details
+
+### path
+
+Iceberg table location.
+
+### pre_sql
+
+SQL statements queried from iceberg table. Note that the table name is `result_table_name` configuration
+
+### snapshot-id
+
+Snapshot ID of the table snapshot to read
+
+### as-of-timestamp
+
+A timestamp in milliseconds; the snapshot used will be the snapshot current at this time.
+
+## Example
+
+```bash
+iceberg {
+    path = "hdfs://localhost:9000/iceberg/warehouse/db/table"
+    result_table_name = "my_source"
+    pre_sql="select * from my_source where dt = '2019-01-01'"
+}
+```
+
+
diff --git a/versioned_docs/version-2.1.1/connector/source/InfluxDb.md b/versioned_docs/version-2.1.1/connector/source/InfluxDb.md
new file mode 100644
index 00000000..e57d97e0
--- /dev/null
+++ b/versioned_docs/version-2.1.1/connector/source/InfluxDb.md
@@ -0,0 +1,87 @@
+# InfluxDb
+
+## Description
+
+Read data from InfluxDB.
+
+:::tip
+
+Engine Supported and plugin name
+
+* [ ] Spark
+* [x] Flink: InfluxDb
+
+:::
+
+## Options
+
+| name        | type           | required | default value |
+| ----------- | -------------- | -------- | ------------- |
+| server_url  | `String`       | yes      | -             |
+| username    | `String`       | no       | -             |
+| password    | `String`       | no       | -             |
+| database    | `String`       | yes      | -             |
+| measurement | `String`       | yes      | -             |
+| fields      | `List<String>` | yes      | -             |
+| field_types | `List<String>` | yes      | -             |
+| parallelism | `Int`          | no       | -             |
+
+### server_url [`String`]
+
+The URL of InfluxDB Server.
+
+### username [`String`]
+
+The username of InfluxDB Server.
+
+### password [`String`]
+
+The password of InfluxDB Server.
+
+### database [`String`]
+
+The database name in InfluxDB.
+
+### measurement [`String`]
+
+The Measurement name in InfluxDB.
+
+### fields [`List<String>`]
+
+The list of Field in InfluxDB.
+
+### field_types [`List<String>`]
+
+The list of Field Types in InfluxDB.
+
+### parallelism [`Int`]
+
+The parallelism of an individual operator, for InfluxDbSource.
+
+## Example
+
+### Simple
+
+```hocon
+InfluxDbSource {
+  server_url = "http://127.0.0.1:8086/"
+  database = "influxdb"
+  measurement = "m"
+  fields = ["time", "temperature"]
+  field_types = ["STRING", "DOUBLE"]
+}
+```
+
+### Auth
+
+```hocon
+InfluxDbSource {
+  server_url = "http://127.0.0.1:8086/"
+  username = "admin"
+  password = "password"
+  database = "influxdb"
+  measurement = "m"
+  fields = ["time", "temperature"]
+  field_types = ["STRING", "DOUBLE"]
+}
+```
diff --git a/versioned_docs/version-2.1.1/connector/source/Jdbc.mdx b/versioned_docs/version-2.1.1/connector/source/Jdbc.mdx
new file mode 100644
index 00000000..ca92c1e3
--- /dev/null
+++ b/versioned_docs/version-2.1.1/connector/source/Jdbc.mdx
@@ -0,0 +1,205 @@
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
+
+# Jdbc
+
+## Description
+
+Read external data source data through JDBC
+
+:::tip
+
+Engine Supported and plugin name
+
+* [x] Spark: Jdbc
+* [x] Flink: Jdbc
+
+:::
+
+## Options
+
+<Tabs
+    groupId="engine-type"
+    defaultValue="spark"
+    values={[
+        {label: 'Spark', value: 'spark'},
+        {label: 'Flink', value: 'flink'},
+    ]}>
+<TabItem value="spark">
+
+| name           | type   | required | default value |
+| -------------- | ------ | -------- | ------------- |
+| driver         | string | yes      | -             |
+| jdbc.*         | string | no       |               |
+| password       | string | yes      | -             |
+| table          | string | yes      | -             |
+| url            | string | yes      | -             |
+| user           | string | yes      | -             |
+| common-options | string | yes      | -             |
+
+</TabItem>
+<TabItem value="flink">
+
+| name                  | type   | required | default value |
+|-----------------------|--------| -------- | ------------- |
+| driver                | string | yes      | -             |
+| url                   | string | yes      | -             |
+| username              | string | yes      | -             |
+| password              | string | no       | -             |
+| query                 | string | yes      | -             |
+| fetch_size            | int    | no       | -             |
+| partition_column      | string | no       | -             |
+| partition_upper_bound | long   | no       | -             |
+| partition_lower_bound | long   | no       | -             |
+| common-options        | string | no       | -             |
+| parallelism           | int    | no       | -             |
+
+</TabItem>
+</Tabs>
+
+### driver [string]
+
+The `jdbc class name` used to connect to the remote data source, if you use MySQL the value is `com.mysql.cj.jdbc.Driver`.
+
+Warn: for license compliance, you have to provide MySQL JDBC driver yourself, e.g. copy `mysql-connector-java-xxx.jar` to `$FLINK_HOME/lib` for Standalone.
+
+### password [string]
+
+##### password
+
+### url [string]
+
+The URL of the JDBC connection. Refer to a case: `jdbc:postgresql://localhost/test`
+
+<Tabs
+    groupId="engine-type"
+    defaultValue="spark"
+    values={[
+        {label: 'Spark', value: 'spark'},
+        {label: 'Flink', value: 'flink'},
+    ]}>
+<TabItem value="spark">
+
+### jdbc [string]
+
+In addition to the parameters that must be specified above, users can also specify multiple optional parameters, which cover [all the parameters](https://spark.apache.org/docs/latest/sql-programming-guide.html#jdbc-to-other-databases) provided by Spark JDBC.
+
+The way to specify parameters is to add the prefix `jdbc.` to the original parameter name. For example, the way to specify `fetchsize` is: `jdbc.fetchsize = 50000` . If these non-essential parameters are not specified, they will use the default values given by Spark JDBC.
+
+### user [string]
+
+username
+
+### table [string]
+
+table name
+
+</TabItem>
+<TabItem value="flink">
+
+### username [string]
+
+username
+
+### query [string]
+
+Query statement
+
+### fetch_size [int]
+
+fetch size
+
+### parallelism [int]
+
+The parallelism of an individual operator, for JdbcSource.
+
+### partition_column [string]
+
+The column name for parallelism's partition, only support numeric type.
+
+### partition_upper_bound [long]
+
+The partition_column max value for scan, if not set SeaTunnel will query database get max value.
+
+### partition_lower_bound [long]
+
+The partition_column min value for scan, if not set SeaTunnel will query database get min value.
+
+</TabItem>
+</Tabs>
+
+### common options [string]
+
+Source plugin common parameters, please refer to [Source Plugin](common-options.mdx) for details
+
+## Example
+
+<Tabs
+    groupId="engine-type"
+    defaultValue="spark"
+    values={[
+        {label: 'Spark', value: 'spark'},
+        {label: 'Flink', value: 'flink'},
+    ]}>
+<TabItem value="spark">
+
+```bash
+jdbc {
+    driver = "com.mysql.jdbc.Driver"
+    url = "jdbc:mysql://localhost:3306/info"
+    table = "access"
+    result_table_name = "access_log"
+    user = "username"
+    password = "password"
+}
+```
+
+> Read MySQL data through JDBC
+
+```bash
+jdbc {
+    driver = "com.mysql.jdbc.Driver"
+    url = "jdbc:mysql://localhost:3306/info"
+    table = "access"
+    result_table_name = "access_log"
+    user = "username"
+    password = "password"
+    jdbc.partitionColumn = "item_id"
+    jdbc.numPartitions = "10"
+    jdbc.lowerBound = 0
+    jdbc.upperBound = 100
+}
+```
+
+> Divide partitions based on specified fields
+
+
+```bash
+jdbc {
+    driver = "com.mysql.jdbc.Driver"
+    url = "jdbc:mysql://localhost:3306/info"
+    table = "access"
+    result_table_name = "access_log"
+    user = "username"
+    password = "password"
+    
+    jdbc.connect_timeout = 10000
+    jdbc.socket_timeout = 10000
+}
+```
+> Timeout config
+
+</TabItem>
+<TabItem value="flink">
+
+```bash
+JdbcSource {
+    driver = com.mysql.jdbc.Driver
+    url = "jdbc:mysql://localhost/test"
+    username = root
+    query = "select * from test"
+}
+```
+
+</TabItem>
+</Tabs>
\ No newline at end of file
diff --git a/versioned_docs/version-2.1.1/connector/source/Kafka.mdx b/versioned_docs/version-2.1.1/connector/source/Kafka.mdx
new file mode 100644
index 00000000..adf08fc3
--- /dev/null
+++ b/versioned_docs/version-2.1.1/connector/source/Kafka.mdx
@@ -0,0 +1,177 @@
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
+
+# Kafka
+
+## Description
+
+To consume data from `Kafka` , supported `Kafka version >= 0.10.0` .
+
+:::tip
+
+Engine Supported and plugin name
+
+* [x] Spark: KafkaStream
+* [x] Flink: Kafka
+
+:::
+
+## Options
+
+<Tabs
+    groupId="engine-type"
+    defaultValue="spark"
+    values={[
+        {label: 'Spark', value: 'spark'},
+        {label: 'Flink', value: 'flink'},
+    ]}>
+<TabItem value="spark">
+
+| name                       | type   | required | default value |
+| -------------------------- | ------ | -------- | ------------- |
+| topics                     | string | yes      | -             |
+| consumer.group.id          | string | yes      | -             |
+| consumer.bootstrap.servers | string | yes      | -             |
+| consumer.*                 | string | no       | -             |
+| common-options             | string | yes      | -             |
+
+</TabItem>
+<TabItem value="flink">
+
+| name                       | type   | required | default value |
+| -------------------------- | ------ | -------- | ------------- |
+| topics                     | string | yes      | -             |
+| consumer.group.id          | string | yes      | -             |
+| consumer.bootstrap.servers | string | yes      | -             |
+| schema                     | string | yes      | -             |
+| format.type                | string | yes      | -             |
+| format.*                   | string | no       | -             |
+| consumer.*                 | string | no       | -             |
+| rowtime.field              | string | no       | -             |
+| watermark                  | long   | no       | -             |
+| offset.reset               | string | no       | -             |
+| common-options             | string | no       | -             |
+
+</TabItem>
+</Tabs>
+
+### topics [string]
+
+`Kafka topic` name. If there are multiple `topics`, use `,` to split, for example: `"tpc1,tpc2"`
+
+### consumer.group.id [string]
+
+`Kafka consumer group id`, used to distinguish different consumer groups
+
+### consumer.bootstrap.servers [string]
+
+`Kafka` cluster address, separated by `,`
+
+<Tabs
+    groupId="engine-type"
+    defaultValue="spark"
+    values={[
+        {label: 'Spark', value: 'spark'},
+        {label: 'Flink', value: 'flink'},
+    ]}>
+<TabItem value="spark">
+
+</TabItem>
+<TabItem value="flink">
+
+### format.type [string]
+
+Currently supports three formats
+
+- json
+- csv
+- avro
+
+### format.* [string]
+
+The `csv` format uses this parameter to set the separator and so on. For example, set the column delimiter to `\t` , `format.field-delimiter=\\t`
+
+### schema [string]
+
+- csv
+    - The `schema` of `csv` is a string of `jsonArray` , such as `"[{\"field\":\"name\",\"type\":\"string\"},{\"field\":\"age\ ",\"type\":\"int\"}]"` .
+
+- json
+    - The `schema` parameter of `json` is to provide a `json string` of the original data, and the `schema` can be automatically generated, but the original data with the most complete content needs to be provided, otherwise the fields will be lost.
+
+- avro
+    - The `schema` parameter of `avro` is to provide a standard `avro schema JSON string` , such as `{\"name\":\"test\",\"type\":\"record\",\"fields\":[{ \"name\":\"name\",\"type\":\"string\"},{\"name\":\"age\",\"type\":\"long\"} ,{\"name\":\"addrs\",\"type\":{\"name\":\"addrs\",\"type\":\"record\",\"fields\" :[{\"name\":\"province\",\"type\":\"string\"},{\"name\":\"city\",\"type\":\"string \"}]}}]}`
+
+- To learn more about how the `Avro Schema JSON string` should be defined, please refer to: https://avro.apache.org/docs/current/spec.html
+
+### rowtime.field [string]
+
+Extract timestamp using current configuration field for flink event time watermark
+
+### watermark [long]
+
+Sets a built-in watermark strategy for rowtime.field attributes which are out-of-order by a bounded time
+    interval. Emits watermarks which are the maximum observed timestamp minus the specified delay.
+
+### offset.reset [string]
+
+The consumer's initial `offset` is only valid for new consumers. There are three modes
+
+- latest
+    - Start consumption from the latest offset
+- earliest
+    - Start consumption from the earliest offset
+- specific
+    - Start consumption from the specified `offset` , and specify the `start offset` of each partition at this time. The setting method is through `offset.reset.specific="{0:111,1:123}"`
+
+</TabItem>
+</Tabs>
+
+### consumer.* [string]
+
+In addition to the above necessary parameters that must be specified by the `Kafka consumer` client, users can also specify multiple `consumer` client non-mandatory parameters, covering [all consumer parameters specified in the official Kafka document](https://kafka.apache.org/documentation.html#consumerconfigs).
+
+The way to specify parameters is to add the prefix `consumer.` to the original parameter name. For example, the way to specify `auto.offset.reset` is: `consumer.auto.offset.reset = latest` . If these non-essential parameters are not specified, they will use the default values given in the official Kafka documentation.
+
+### common options [string]
+
+Source plugin common parameters, please refer to [Source Plugin](common-options.mdx) for details
+
+## Examples
+
+<Tabs
+    groupId="engine-type"
+    defaultValue="spark"
+    values={[
+        {label: 'Spark', value: 'spark'},
+        {label: 'Flink', value: 'flink'},
+    ]}>
+<TabItem value="spark">
+
+```bash
+kafkaStream {
+    topics = "seatunnel"
+    consumer.bootstrap.servers = "localhost:9092"
+    consumer.group.id = "seatunnel_group"
+}
+```
+
+</TabItem>
+<TabItem value="flink">
+
+```bash
+KafkaTableStream {
+    consumer.bootstrap.servers = "127.0.0.1:9092"
+    consumer.group.id = "seatunnel5"
+    topics = test
+    result_table_name = test
+    format.type = csv
+    schema = "[{\"field\":\"name\",\"type\":\"string\"},{\"field\":\"age\",\"type\":\"int\"}]"
+    format.field-delimiter = ";"
+    format.allow-comments = "true"
+    format.ignore-parse-errors = "true"
+}
+```
+
+</TabItem>
+</Tabs>
\ No newline at end of file
diff --git a/versioned_docs/version-2.1.1/connector/source/Kudu.md b/versioned_docs/version-2.1.1/connector/source/Kudu.md
new file mode 100644
index 00000000..6cef5235
--- /dev/null
+++ b/versioned_docs/version-2.1.1/connector/source/Kudu.md
@@ -0,0 +1,43 @@
+# Kudu
+
+## Description
+
+Read data from Kudu.
+
+:::tip
+
+Engine Supported and plugin name
+
+* [x] Spark: Kudu
+* [ ] Flink
+
+:::
+
+## Options
+
+| name           | type   | required | default value |
+| -------------- | ------ | -------- | ------------- |
+| kudu_master            | string | yes      | -             |
+| kudu_table       | string | yes      | -         |
+
+### kudu_master [string]
+
+Kudu Master
+
+### kudu_table [string]
+
+Kudu Table
+
+### common options [string]
+
+Source Plugin common parameters, refer to [Source Plugin](common-options.mdx) for details
+
+## Example
+
+```bash
+kudu {
+    kudu_master = "master:7051"
+    kudu_table = "impala::test_db.test_table"
+    result_table_name = "kudu_result_table"
+}
+```
diff --git a/versioned_docs/version-2.1.1/connector/source/MongoDB.md b/versioned_docs/version-2.1.1/connector/source/MongoDB.md
new file mode 100644
index 00000000..5e7aba35
--- /dev/null
+++ b/versioned_docs/version-2.1.1/connector/source/MongoDB.md
@@ -0,0 +1,62 @@
+# MongoDb
+
+## Description
+
+Read data from MongoDB.
+
+:::tip
+
+Engine Supported and plugin name
+
+* [x] Spark: MongoDb
+* [ ] Flink
+
+:::
+
+## Options
+
+| name                  | type   | required | default value |
+|-----------------------| ------ |----------|---------------|
+| readconfig.uri        | string | yes      | -             |
+| readconfig.database   | string | yes      | -             |
+| readconfig.collection | string | yes      | -             |
+| readconfig.*          | string | no       | -             |
+| schema                | string | no       | -             |
+| common-options        | string | yes      | -             |
+
+### readconfig.uri [string]
+
+MongoDB uri
+
+### readconfig.database [string]
+
+MongoDB database
+
+### readconfig.collection [string]
+
+MongoDB collection
+
+### readconfig.* [string]
+
+More other parameters can be configured here, see [MongoDB Configuration](https://docs.mongodb.com/spark-connector/current/configuration/) for details, see the Input Configuration section. The way to specify parameters is to prefix the original parameter name `readconfig.` For example, the way to set `spark.mongodb.input.partitioner` is `readconfig.spark.mongodb.input.partitioner="MongoPaginateBySizePartitioner"` . If you do not specify these optional parameters, the default values of th [...]
+
+### schema [string]
+
+Because `MongoDB` does not have the concept of `schema`, when spark reads `MongoDB` , it will sample `MongoDB` data and infer the `schema` . In fact, this process will be slow and may be inaccurate. This parameter can be manually specified. Avoid these problems. `schema` is a `json` string, such as `{\"name\":\"string\",\"age\":\"integer\",\"addrs\":{\"country\":\"string\ ",\"city\":\"string\"}}`
+
+### common options [string]
+
+Source Plugin common parameters, refer to [Source Plugin](common-options.mdx) for details
+
+## Example
+
+```bash
+mongodb {
+    readconfig.uri = "mongodb://username:password@127.0.0.1:27017/mypost"
+    readconfig.database = "mydatabase"
+    readconfig.collection = "mycollection"
+    readconfig.spark.mongodb.input.partitioner = "MongoPaginateBySizePartitioner"
+    schema="{\"name\":\"string\",\"age\":\"integer\",\"addrs\":{\"country\":\"string\",\"city\":\"string\"}}"
+    result_table_name = "mongodb_result_table"
+}
+```
diff --git a/versioned_docs/version-2.1.1/connector/source/Phoenix.md b/versioned_docs/version-2.1.1/connector/source/Phoenix.md
new file mode 100644
index 00000000..0232b040
--- /dev/null
+++ b/versioned_docs/version-2.1.1/connector/source/Phoenix.md
@@ -0,0 +1,58 @@
+# Phoenix
+
+## Description
+
+Read external data source data through `Phoenix` , compatible with `Kerberos`  authentication
+
+:::tip
+
+Engine Supported and plugin name
+
+* [x] Spark: Phoenix
+* [ ] Flink
+
+:::
+
+## Options
+
+| name       | type   | required | default value |
+| ---------- | ------ | -------- | ------------- |
+| zk-connect | string | yes      | -             |
+| table      | string | yes      |               |
+| columns    | string | no       | -             |
+| tenantId   | string | no       | -             |
+| predicate  | string | no       | -             |
+
+### zk-connect [string]
+
+Connection string, configuration example: `host1:2181,host2:2181,host3:2181 [/znode]`
+
+### table [string]
+
+Source data table name
+
+### columns [string-list]
+
+Read column name configuration. Read all columns set to `[]` , optional configuration item, default is `[]` .
+
+### tenant-id [string]
+
+Tenant ID, optional configuration item
+
+### predicate [string]
+
+Conditional filter string configuration, optional configuration items
+
+### common options [string]
+
+Source plugin common parameters, please refer to [Source Plugin](common-options.mdx) for details
+
+## Example
+
+```bash
+Phoenix {
+  zk-connect = "host1:2181,host2:2181,host3:2181"
+  table = "table22"
+  result_table_name = "tmp1"
+}
+```
diff --git a/versioned_docs/version-2.1.1/connector/source/Redis.md b/versioned_docs/version-2.1.1/connector/source/Redis.md
new file mode 100644
index 00000000..adb767ea
--- /dev/null
+++ b/versioned_docs/version-2.1.1/connector/source/Redis.md
@@ -0,0 +1,87 @@
+# Redis
+
+## Description
+
+Read data from Redis.
+
+:::tip
+
+Engine Supported and plugin name
+
+* [x] Spark: Redis
+* [ ] Flink
+
+:::
+
+## Options
+
+| name                | type     | required | default value |
+|---------------------|----------|----------|---------------|
+| host                | string   | no       | "localhost"   |
+| port                | int      | no       | 6379          |
+| auth                | string   | no       |               |
+| db_num              | int      | no       | 0             |
+| keys_or_key_pattern | string   | yes      |               |
+| partition_num       | int      | no       | 3             |
+| data_type           | string   | no       | "KV"          |
+| timeout             | int      | no       | 2000          |
+| common-options      | string   | yes      |               |
+
+### host [string]
+
+Redis server address, default `"localhost"`
+
+### port [int]
+
+Redis service port, default `6379`
+
+### auth [string]
+
+Redis authentication password
+
+### db_num [int]
+
+Redis database index ID. It is connected to db `0` by default
+
+### keys_or_key_pattern [string]
+
+Redis Key, support fuzzy matching
+
+### partition_num [int]
+
+Number of Redis shards. The default is `3`
+
+### data_type [string]
+
+Redis data type eg: `KV HASH LIST SET ZSET`
+
+### timeout [int]
+
+Redis timeout
+
+### common options [string]
+
+Source Plugin common parameters, refer to [Source Plugin](common-options.mdx) for details
+
+## Example
+
+```bash
+redis {
+  host = "localhost"
+  port = 6379
+  auth = "myPassword"
+  db_num = 1
+  keys_or_key_pattern = "*"
+  partition_num = 20
+  data_type = "HASH"
+  result_table_name = "hash_result_table"
+}
+```
+
+> The returned table is a data table in which both fields are strings
+
+| raw_key   | raw_message |
+| --------- | ----------- |
+| keys      | xxx         |
+| my_keys   | xxx         |
+| keys_mine | xxx         |
diff --git a/versioned_docs/version-2.1.1/connector/source/Socket.mdx b/versioned_docs/version-2.1.1/connector/source/Socket.mdx
new file mode 100644
index 00000000..86d6fafe
--- /dev/null
+++ b/versioned_docs/version-2.1.1/connector/source/Socket.mdx
@@ -0,0 +1,102 @@
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
+
+# Socket
+
+## Description
+
+`SocketStream` is mainly used to receive `Socket` data and is used to quickly verify `Spark streaming` computing.
+
+:::tip
+
+Engine Supported and plugin name
+
+* [x] Spark: SocketStream
+* [x] Flink: Socket
+
+:::
+
+## Options
+
+<Tabs
+    groupId="engine-type"
+    defaultValue="spark"
+    values={[
+        {label: 'Spark', value: 'spark'},
+        {label: 'Flink', value: 'flink'},
+    ]}>
+<TabItem value="spark">
+
+| name           | type   | required | default value |
+| -------------- | ------ | -------- | ------------- |
+| host           | string | no       | localhost     |
+| port           | number | no       | 9999          |
+| common-options | string | yes      | -             |
+
+### host [string]
+
+socket server hostname
+
+### port [number]
+
+socket server port
+
+### common options [string]
+
+Source plugin common parameters, please refer to [Source Plugin](common-options.mdx) for details
+
+</TabItem>
+<TabItem value="flink">
+
+| name           | type   | required | default value |
+| -------------- | ------ | -------- | ------------- |
+| host           | string | no       | localhost     |
+| port           | int    | no       | 9999          |
+| common-options | string | no       | -             |
+
+### host [string]
+
+socket server hostname
+
+### port [int]
+
+socket server port
+
+### common options [string]
+
+Source plugin common parameters, please refer to [Source Plugin](common-options.mdx) for details
+
+</TabItem>
+</Tabs>
+
+## Examples
+
+<Tabs
+    groupId="engine-type"
+    defaultValue="spark"
+    values={[
+        {label: 'Spark', value: 'spark'},
+        {label: 'Flink', value: 'flink'},
+    ]}>
+<TabItem value="spark">
+
+```bash
+socketStream {
+  port = 9999
+}
+```
+
+</TabItem>
+<TabItem value="flink">
+
+```bash
+source {
+    SocketStream{
+        result_table_name = "socket"
+        field_name = "info"
+    }
+}
+```
+
+</TabItem>
+</Tabs>
\ No newline at end of file
diff --git a/versioned_docs/version-2.1.1/connector/source/Tidb.md b/versioned_docs/version-2.1.1/connector/source/Tidb.md
new file mode 100644
index 00000000..fd08f9c4
--- /dev/null
+++ b/versioned_docs/version-2.1.1/connector/source/Tidb.md
@@ -0,0 +1,66 @@
+# Tidb
+
+### Description
+
+Read data from Tidb.
+
+:::tip
+
+Engine Supported and plugin name
+
+* [x] Spark: Tidb
+* [ ] Flink
+
+:::
+
+### Env Options
+
+| name           | type   | required | default value |
+| -------------- | ------ | -------- | ------------- |
+| [spark.tispark.pd.addresses](#spark.tispark.pd.addresses-string)       | string | yes      | -             |
+| [spark.sql.extensions](#spark.sql.extensions-string)        | string | yes      | org.apache.spark.sql.TiExtensions         |
+
+##### spark.tispark.pd.addresses [string]
+
+your pd servers
+
+##### spark.sql.extensions [string]
+
+default value : org.apache.spark.sql.TiExtensions
+
+### Options
+
+| name           | type   | required | default value |
+| -------------- | ------ | -------- | ------------- |
+| [database](#database-string)       | string | yes      | -             |
+| [pre_sql](#pre_sql-string)        | string | yes      | -         |
+
+##### database [string]
+
+Tidb database
+
+##### pre_sql [string]
+
+sql script
+
+##### common options [string]
+
+Source Plugin common parameters, refer to [Source Plugin](common-options.mdx) for details
+
+### Example
+
+```bash
+env {
+    spark.tispark.pd.addresses = "192.168.0.1:2379"
+    spark.sql.extensions = "org.apache.spark.sql.TiExtensions"
+}
+
+source {
+    tidb {
+        database = "test"
+        pre_sql = "select * from table1"
+    }
+}
+
+```
+
diff --git a/versioned_docs/version-2.1.1/connector/source/Webhook.md b/versioned_docs/version-2.1.1/connector/source/Webhook.md
new file mode 100644
index 00000000..d669ad52
--- /dev/null
+++ b/versioned_docs/version-2.1.1/connector/source/Webhook.md
@@ -0,0 +1,42 @@
+# Webhook
+
+## Description
+
+Provide http interface to push data,only supports post requests.
+
+:::tip
+
+Engine Supported and plugin name
+
+* [x] Spark: Webhook
+* [ ] Flink
+
+:::
+
+## Options
+
+| name | type   | required | default value |
+| ---- | ------ | -------- | ------------- |
+| port | int    | no       | 9999          |
+| path | string | no       | /             |
+
+### port[int]
+
+Port for push requests, default 9999.
+
+### path[string]
+
+Push request path, default "/".
+
+### common options [string]
+
+Source plugin common parameters, please refer to [Source Plugin](common-options.mdx) for details.
+
+## Example
+
+```
+Webhook {
+      result_table_name = "request_body"
+   }
+```
+
diff --git a/versioned_docs/version-2.1.1/connector/source/common-options.mdx b/versioned_docs/version-2.1.1/connector/source/common-options.mdx
new file mode 100644
index 00000000..22ade54f
--- /dev/null
+++ b/versioned_docs/version-2.1.1/connector/source/common-options.mdx
@@ -0,0 +1,89 @@
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
+
+# Common Options
+
+## Source common parameters
+
+<Tabs
+    groupId="engine-type"
+    defaultValue="spark"
+    values={[
+        {label: 'Spark', value: 'spark'},
+        {label: 'Flink', value: 'flink'},
+    ]}>
+<TabItem value="spark">
+
+| name              | type   | required | default value |
+| ----------------- | ------ | -------- | ------------- |
+| result_table_name | string | yes      | -             |
+| table_name        | string | no       | -             |
+
+### result_table_name [string]
+
+When `result_table_name` is not specified, the data processed by this plug-in will not be registered as a data set that can be directly accessed by other plugins, or called a temporary table (table);
+
+When `result_table_name` is specified, the data processed by this plug-in will be registered as a data set (dataset) that can be directly accessed by other plug-ins, or called a temporary table (table). The dataset registered here can be directly accessed by other plugins by specifying `source_table_name`.
+
+### table_name [string]
+
+[Deprecated since v1.4] The function is the same as `result_table_name` , this parameter will be deleted in subsequent Release versions, and `result_table_name`  parameter is recommended.
+
+</TabItem>
+<TabItem value="flink">
+
+| name              | type   | required | default value |
+| ----------------- | ------ | -------- | ------------- |
+| result_table_name | string | no       | -             |
+| field_name        | string | no       | -             |
+
+### result_table_name [string]
+
+When `result_table_name` is not specified, the data processed by this plugin will not be registered as a data set `(dataStream/dataset)` that can be directly accessed by other plugins, or called a temporary table `(table)` ;
+
+When `result_table_name` is specified, the data processed by this plugin will be registered as a data set `(dataStream/dataset)` that can be directly accessed by other plugins, or called a temporary table `(table)` . The data set `(dataStream/dataset)` registered here can be directly accessed by other plugins by specifying `source_table_name` .
+
+### field_name [string]
+
+When the data is obtained from the upper-level plug-in, you can specify the name of the obtained field, which is convenient for use in subsequent sql plugins.
+
+</TabItem>
+</Tabs>
+
+## Example
+
+<Tabs
+    groupId="engine-type"
+    defaultValue="spark"
+    values={[
+        {label: 'Spark', value: 'spark'},
+        {label: 'Flink', value: 'flink'},
+    ]}>
+<TabItem value="spark">
+
+```bash
+fake {
+    result_table_name = "view_table"
+}
+```
+
+> The result of the data source `fake` will be registered as a temporary table named `view_table` . This temporary table can be used by any Filter or Output plugin by specifying `source_table_name` .
+
+</TabItem>
+<TabItem value="flink">
+
+```bash
+source {
+    FakeSourceStream {
+        result_table_name = "fake"
+        field_name = "name,age"
+    }
+}
+```
+
+> The result of the data source `FakeSourceStream` will be registered as a temporary table named `fake` . This temporary table can be used by any `Transform` or `Sink` plugin by specifying `source_table_name` .
+>
+> `field_name` names the two columns of the temporary table `name` and `age` respectively.
+
+</TabItem>
+</Tabs>
diff --git a/versioned_docs/version-2.1.1/connector/source/neo4j.md b/versioned_docs/version-2.1.1/connector/source/neo4j.md
new file mode 100644
index 00000000..534b6dee
--- /dev/null
+++ b/versioned_docs/version-2.1.1/connector/source/neo4j.md
@@ -0,0 +1,143 @@
+# Neo4j
+
+## Description
+
+Read data from Neo4j.
+
+Neo4j Connector for Apache Spark allows you to read data from Neo4j in 3 different ways: by node labels, by relationship name, and by direct Cypher query.
+
+The Options required of yes* means that  you must specify  one way of (query labels relationship)
+
+for detail neo4j config message please visit [neo4j doc](https://neo4j.com/docs/spark/current/reading/) 
+
+:::tip
+
+Engine Supported and plugin name
+
+* [x] Spark: Neo4j
+* [ ] Flink
+
+:::
+
+## Options
+
+| name                                                                     | type   | required | default value |
+| -------------------------------------------------------------------------| ------ | -------- | ------------- |
+| [result_table_name](#result.table.name-string)                           | string | yes      | -             |
+| [authentication.type](#authentication.type-string)                       | string | no       | -             |
+| [authentication.basic.username](#authentication.basic.username-string)   | string | no       | -             |
+| [authentication.basic.password](#authentication.basic.password-string)   | string | no       | -             |
+| [url](#url-string)                                                       | string | yes      | -             |
+| [query](#query-string)                                                   | string | yes*     | -             |
+| [labels](#labels-string)                                                 | string | yes*     | -             |
+| [relationship](#relationship-string)                                     | string | yes*     | -             |
+| [schema.flatten.limit](#schema.flatten.limit-string)                     | string | no       | -             |
+| [schema.strategy](#schema.strategy-string)                               | string | no       | -             |
+| [pushdown.filters.enabled](#pushdown.filters.enabled-string)             | string | no       | -             |
+| [pushdown.columns.enabled](#pushdown.columns.enabled-string)             | string | no       | -             |
+| [partitions](#partitions-string)                                         | string | no       | -             |
+| [query.count](#query.count-string)                                       | string | no       | -             |
+| [relationship.nodes.map](#relationship.nodes.map-string)                 | string | no       | -             |
+| [relationship.source.labels](#relationship.source.labels-string)         | string | Yes      | -             |
+| [relationship.target.labels](#relationship.target.labels-string)         | string | Yes      | -             |
+
+### result.table.name [string]
+
+result table name
+
+### authentication.type [string]
+
+authentication type
+
+### authentication.basic.username [string]
+
+username
+
+### authentication.basic.password [string]
+
+password
+
+### url [string]
+url 
+
+### query [string]
+
+Cypher query to read the data.You must specify one option from [query, labels OR relationship]
+
+### labels [string]
+
+List of node labels separated by : The first label will be the primary label. You must specify one option from [query, labels OR relationship]
+
+### relationship [string]
+
+Name of a relationship. You must specify one option from [query, labels OR relationship]
+
+### schema.flatten.limit [string]
+Number of records to be used to create the Schema (only if APOC are not installed)
+
+### schema.strategy [string]
+
+Strategy used by the connector in order to compute the Schema definition for the Dataset. Possibile values are string, sample. When string it coerces all the properties to String otherwise it will try to sample the Neo4j’s dataset.
+
+### pushdown.filters.enabled [string]
+
+Enable or disable the Push Down Filters support
+
+### pushdown.columns.enabled [string]
+
+Enable or disable the Push Down Column support
+
+### partitions [string]
+
+This defines the parallelization level while pulling data from Neo4j.
+
+Note: as more parallelization does not mean more performances so please tune wisely in according to your Neo4j installation.
+
+### query.count [string]
+
+Query count, used only in combination with query option, it’s a query that returns a count field like the following:
+
+MATCH (p:Person)-[r:BOUGHT]->(pr:Product)
+WHERE pr.name = 'An Awesome Product'
+RETURN count(p) AS count
+or a simple number that represents the amount of records returned by query. Consider that the number passed by this value represent the volume of the data pulled of Neo4j, so please use it carefully.
+
+### relationship.nodes.map [string]
+
+
+If true return source and target nodes as Map<String, String>, otherwise we flatten the properties by returning every single node property as column prefixed by source or target
+
+### relationship.source.labels [string]
+
+List of source node Labels separated by :
+
+### relationship.target.labels [string]
+
+List of target node Labels separated by :
+
+## Example
+
+```bash
+   Neo4j {
+      result_table_name = "test"
+      authentication.type = "basic"
+      authentication.basic.username = "test"
+      authentication.basic.password = "test"
+      url = "bolt://localhost:7687"
+      labels = "Person"
+      #query = "MATCH (n1)-[r]->(n2) RETURN r, n1, n2 "
+   }
+```
+
+> The returned table is a data table in which both fields are strings
+
+| `<id>` | `<labels>` | name               | born |
+| ------ | ---------- | ------------------ | ---- |
+| 1      | [Person]   | Keanu Reeves       | 1964 |
+| 2      | [Person]   | Carrie-Anne Moss   | 1967 |
+| 3      | [Person]   | Laurence Fishburne | 1961 |
+| 4      | [Person]   | Hugo Weaving       | 1960 |
+| 5      | [Person]   | Andy Wachowski     | 1967 |
+| 6      | [Person]   | Lana Wachowski     | 1965 |
+| 7      | [Person]   | Joel Silver        | 1952 |
+| 8      | [Person]   | Emil Eifrem        | 1978 |
diff --git a/versioned_docs/version-2.1.1/contribution/contribute-plugin.md b/versioned_docs/version-2.1.1/contribution/contribute-plugin.md
new file mode 100644
index 00000000..95a95906
--- /dev/null
+++ b/versioned_docs/version-2.1.1/contribution/contribute-plugin.md
@@ -0,0 +1,125 @@
+# Contribute Spark Plugins
+
+There are two parent modules for Spark plugins:
+
+1. [seatunnel-connectors-spark](https://github.com/apache/incubator-seatunnel/tree/dev/seatunnel-connectors/seatunnel-connectors-spark)
+2. [seatunnel-transforms-spark](https://github.com/apache/incubator-seatunnel/tree/dev/seatunnel-transforms/seatunnel-transforms-spark)
+
+Once you want to contribute a new plugin, you need to:
+
+## Create plugin module
+Create your plugin module under the corresponding parent plugin module.
+For example, if you want to add a new Spark connector plugin, you need to create a new module under the `seatunnel-connectors-spark` module.
+
+```java
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+    <parent>
+        <groupId>org.apache.seatunnel</groupId>
+        <artifactId>seatunnel-connectors-spark</artifactId>
+        <version>${revision}</version>
+    </parent>
+    <modelVersion>4.0.0</modelVersion>
+
+    <artifactId>seatunnel-connector-spark-hello</artifactId>
+
+    <dependencies>
+        <dependency>
+            <groupId>org.apache.seatunnel</groupId>
+            <artifactId>seatunnel-api-spark</artifactId>
+            <version>${project.version}</version>
+        </dependency>
+    </dependencies>
+</project>
+```
+## Add plugin implementation
+
+You need to implement the `Connector` service provider interface. e.g. `BaseSource`/`BaseSink`.
+
+Conveniently, there are some abstract class can help you easy to create your plugin. If you want to create a source connector,
+you can implement with `SparkBatchSource`/`SparkStreamingSource`. If you want to create a sink connector, you can implement with `SparkBatchSink`/`SparkStreamingSink`.
+
+The methods defined in `SparkBatchSource` are some lifecycle methods. will be executed by SeaTunnel engine.
+The execution order of the lifecycle methods is: `checkConfig` -> `prepare` -> `getData` -> `close`.
+
+```java
+import java.util.Date;
+
+public class Hello extends SparkBatchSource {
+    @Override
+    public Dataset<Row> getData(SparkEnvironment env) {
+        // do your logic here to generate data
+        Dataset<Row> dataset = null;
+        return dataset;
+    }
+
+    @Override
+    public CheckResult checkConfig() {
+        return super.checkConfig();
+    }
+
+    @Override
+    public void prepare(SparkEnvironment env) {
+        super.prepare(env);
+    }
+
+    @Override
+    public void close() throws Exception {
+        super.close();
+    }
+
+    @Override
+    public String getPluginName() {
+        return "hello";
+    }
+}
+```
+The `getPluginName` method is used to identify the plugin name.
+
+After you finish your implementation, you need to add a service provider to the `META-INF/services` file.
+The file name should be `org.apache.seatunnel.spark.BaseSparkSource` or `org.apache.seatunnel.spark.BaseSparkSink`, dependents on the plugin type.
+The content of the file should be the fully qualified class name of your implementation.
+
+## Add plugin to the distribution
+
+You need to add your plugin to the `seatunnel-core-spark` module, then the plugin will in distribution.
+```java
+<dependency>
+    <groupId>org.apache.seatunnel</groupId>
+    <artifactId>seatunnel-connector-spark-hello</artifactId>
+    <version>${project.version}</version>
+</dependency>
+```
+
+# Contribute Flink Plugins
+
+The steps to contribute a Flink plugin is similar to the steps to contribute a Spark plugin. 
+Different from Spark, you need to add your plugin in Flink plugin modules.
+
+# Add e2e tests for your plugin
+
+Once you add a new plugin, it is recommended to add e2e tests for it. We have a `seatunnel-e2e` module to help you to do this.
+
+For example, if you want to add an e2e test for your flink connector, you can create a new test in `seatunnel-flink-e2e` module.
+And extend the FlinkContainer class in the test.
+
+```java
+public class HellpSourceIT extends FlinkContainer {
+
+    @Test
+    public void testHellpSource() throws IOException, InterruptedException {
+        Container.ExecResult execResult = executeSeaTunnelFlinkJob("/hello/hellosource.conf");
+        Assert.assertEquals(0, execResult.getExitCode());
+        // do some other assertion here
+    }
+
+```
+Once your class implements the `FlinkContainer` interface, it will auto create a Flink cluster in Docker, and you just need to
+execute the `executeSeaTunnelFlinkJob` method with your SeaTunnel configuration file, it will submit the SeaTunnel job.
+
+In most times, you need to start a third-part datasource in your test, for example, if you add a clickhouse connectors, you may need to 
+start a Clickhouse database in your test. You can use `GenericContainer` to start a container.
+
+It should be noted that your e2e test class should be named ending with `IT`. By default, we will not execute the test if the class name ending with `IT`.
+You can add `-DskipIT=false` to execute the e2e test, it will rely on a Docker environment.
diff --git a/versioned_docs/version-2.1.1/contribution/new-license.md b/versioned_docs/version-2.1.1/contribution/new-license.md
new file mode 100644
index 00000000..44c80b34
--- /dev/null
+++ b/versioned_docs/version-2.1.1/contribution/new-license.md
@@ -0,0 +1,25 @@
+# How To Add New License
+
+If you have any new Jar binary package adding in you PR, you need to follow the steps below to notice license
+
+1. declared in `tools/dependencies/known-dependencies.txt`
+
+2. Add the corresponding License file under `seatunnel-dist/release-docs/licenses`, if it is a standard Apache License, it does not need to be added
+
+3. Add the corresponding statement in `seatunnel-dist/release-docs/LICENSE`
+
+   ```bash
+   # At the same time, you can also use the script to assist the inspection.
+   # Because it only uses the Python native APIs and does not depend on any third-party libraries, it can run using the original Python environment.
+   # Please refer to the documentation if you do not have a Python env: https://www.python.org/downloads/
+   
+   # First, generate the seatunnel-dist/target/THIRD-PARTY.txt temporary file
+   ./mvnw license:aggregate-add-third-party -DskipTests -Dcheckstyle.skip
+   # Second, run the script to assist the inspection
+   python3 tools/dependencies/license.py seatunnel-dist/target/THIRD-PARTY.txt seatunnel-dist/release-docs/LICENSE true
+   ```
+
+4. Add the corresponding statement in `seatunnel-dist/release-docs/NOTICE`
+
+If you want to learn more about strategy of License, you could read
+[License Notice](https://seatunnel.apache.org/community/submit_guide/license) in submit guide.
diff --git a/versioned_docs/version-2.1.1/contribution/setup.md b/versioned_docs/version-2.1.1/contribution/setup.md
new file mode 100644
index 00000000..d7ec81ce
--- /dev/null
+++ b/versioned_docs/version-2.1.1/contribution/setup.md
@@ -0,0 +1,86 @@
+# Set Up Develop Environment
+
+In this section, we are going to show you how to set up your development environment for SeaTunnel, and then run a simple
+example in your JetBrains IntelliJ IDEA.
+
+> You can develop or test SeaTunnel code in any development environment that you like, but here we use
+> [JetBrains IDEA](https://www.jetbrains.com/idea/) as an example to teach you to step by step environment.
+
+## Prepare
+
+Before we start talking about how to set up the environment, we need to do some preparation work. Make sure you already
+have installed the following software:
+
+* [Git](https://git-scm.com/book/en/v2/Getting-Started-Installing-Git) installed.
+* [Java](https://www.java.com/en/download/) ( JDK8/JDK11 are supported by now) installed and `JAVA_HOME` set.
+* [Scala](https://www.scala-lang.org/download/2.11.12.html) (only scala 2.11.12 supported by now) installed.
+* [JetBrains IDEA](https://www.jetbrains.com/idea/) installed.
+
+## Set Up
+
+### Clone the Source Code
+
+First of all, you need to clone the SeaTunnel source code from [GitHub](https://github.com/apache/incubator-seatunnel).
+
+```shell
+git clone git@github.com:apache/incubator-seatunnel.git
+```
+
+### Install Subproject Locally
+
+After cloning the source code, you should run the `./mvnw` command to install the subproject to the maven local repository.
+Otherwise, your code could not start in JetBrains IntelliJ IDEA correctly.
+
+```shell
+./mvnw install -Dmaven.test.skip
+```
+
+### Install JetBrains IDEA Scala Plugin
+
+Now, you can open your JetBrains IntelliJ IDEA and explore the source code, but allow building Scala code in IDEA,
+you should also install JetBrains IntelliJ IDEA's [Scala plugin](https://plugins.jetbrains.com/plugin/1347-scala).
+See [install plugins for IDEA](https://www.jetbrains.com/help/idea/managing-plugins.html#install-plugins) if you want to.
+
+## Run Simple Example
+
+After all the above things are done, you just finish the environment setup and can run an example we provide to you out
+of box. All examples are in module `seatunnel-examples`, you could pick one you are interested in, [running or debugging
+it in IDEA](https://www.jetbrains.com/help/idea/run-debug-configuration.html) as you wish.
+
+Here we use `seatunnel-examples/seatunnel-flink-examples/src/main/java/org/apache/seatunnel/example/flink/LocalFlinkExample.java`
+as an example, when you run it successfully you could see the output as below:
+
+
+```log
++I[Gary, 1647423592505]
++I[Kid Xiong, 1647423593510]
++I[Ricky Huo, 1647423598537]
+...
+...
++I[Gary, 1647423597533]
+```
+
+## What's More
+
+All our examples use simple source and sink to make it less dependent and easy to run. You can change the example configuration
+in `resources/examples`. You could change your configuration as below, if you want to use PostgreSQL as the source and
+sink to console. 
+
+```conf
+env {
+  execution.parallelism = 1
+}
+
+source {
+  JdbcSource {
+    driver = org.postgresql.Driver
+    url = "jdbc:postgresql://host:port/database"
+    username = postgres
+    query = "select * from test"
+  }
+}
+
+sink {
+  ConsoleSink {}
+}
+```
diff --git a/versioned_docs/version-2.1.1/deployment.mdx b/versioned_docs/version-2.1.1/deployment.mdx
new file mode 100644
index 00000000..edcf4779
--- /dev/null
+++ b/versioned_docs/version-2.1.1/deployment.mdx
@@ -0,0 +1,124 @@
+# Deployment
+
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
+
+This section will show you how to submit your SeaTunnel application in all kinds of cluster engine. If you still not installation
+SeaTunnel you could go to see [quick start](/docs/category/start) about how to prepare and change SeaTunnel configuration firstly.
+
+## Deployment in All Kind of Engine
+
+### Local Mode(Spark Only)
+
+Local mode only support Spark engine for now.
+
+```shell
+./bin/start-seatunnel-spark.sh \
+--master local[4] \
+--deploy-mode client \
+--config ./config/application.conf
+```
+
+### Standalone Cluster
+
+<Tabs
+    groupId="engine-type"
+    defaultValue="spark"
+    values={[
+        {label: 'Spark', value: 'spark'},
+        {label: 'Flink', value: 'flink'},
+    ]}>
+<TabItem value="spark">
+
+```shell
+# client mode
+./bin/start-seatunnel-spark.sh \
+--master spark://ip:7077 \
+--deploy-mode client \
+--config ./config/application.conf
+
+# cluster mode
+./bin/start-seatunnel-spark.sh \
+--master spark://ip:7077 \
+--deploy-mode cluster \
+--config ./config/application.conf
+```
+
+</TabItem>
+<TabItem value="flink">
+
+```shell
+bin/start-seatunnel-flink.sh \
+--config config-path
+
+# -p 2 specifies that the parallelism of flink job is 2. You can also specify more parameters, use flink run -h to view
+bin/start-seatunnel-flink.sh \
+-p 2 \
+--config config-path
+```
+
+</TabItem>
+</Tabs>
+
+### Yarn Cluster
+
+<Tabs
+    groupId="engine-type"
+    defaultValue="spark"
+    values={[
+        {label: 'Spark', value: 'spark'},
+        {label: 'Flink', value: 'flink'},
+    ]}>
+<TabItem value="spark">
+
+```shell
+# client mode
+./bin/start-seatunnel-spark.sh \
+--master yarn \
+--deploy-mode client \
+--config ./config/application.conf
+
+# cluster mode
+./bin/start-seatunnel-spark.sh \
+--master yarn \
+--deploy-mode cluster \
+--config ./config/application.conf
+```
+
+</TabItem>
+<TabItem value="flink">
+
+```shell
+bin/start-seatunnel-flink.sh \
+-m yarn-cluster \
+--config config-path
+
+# -ynm seatunnel specifies the name displayed in the yarn webUI as seatunnel, you can also specify more parameters, use flink run -h to view
+bin/start-seatunnel-flink.sh \
+-m yarn-cluster \
+-ynm seatunnel \
+--config config-path
+```
+
+</TabItem>
+</Tabs>
+
+### Mesos Cluster(Spark Only)
+
+Mesos cluster deployment only support Spark engine for now.
+
+```shell
+# cluster mode
+./bin/start-seatunnel-spark.sh \
+--master mesos://ip:7077 \
+--deploy-mode cluster \
+--config ./config/application.conf
+```
+
+## Run Your Engine in Scaling
+
+(This section is about engine instead of SeaTunnel itself, it is background knowledge for user who not understand engine
+cluster type). Both Spark and Flink could be run in different kind of cluster and any scale. This guide only show the basic
+usage of SeaTunnel which build above engine Spark or Flink, if you want to scale your engine cluster see
+[Spark](https://spark.apache.org/docs/latest/running-on-kubernetes.html)
+or [Flink](https://nightlies.apache.org/flink/flink-docs-release-1.14/docs/deployment/resource-providers/native_kubernetes/) document.
diff --git a/versioned_docs/version-2.1.1/faq.md b/versioned_docs/version-2.1.1/faq.md
new file mode 100644
index 00000000..8712e72d
--- /dev/null
+++ b/versioned_docs/version-2.1.1/faq.md
@@ -0,0 +1,367 @@
+# FAQ
+
+why-i-should-install-computing-engine-like-spark-or-flink
+
+## Why I should install computing engine like Spark or Flink
+
+<!-- We should add the reason -->
+TODO
+
+## I have a question, but I can not solve it by myself
+
+I encounter a problem when using SeaTunnel and I cannot solve it by myself. What should I do? Firstly search in [Issue list](https://github.com/apache/incubator-seatunnel/issues) or [mailing list](https://lists.apache.org/list.html?dev@seatunnel.apache.org) to see if someone has already asked the same question and got the answer. If you still cannot find the answer, you can contact community members for help in[ these ways](https://github.com/apache/incubator-seatunnel#contact-us) .
+
+## how to declare variable
+
+If you want to know how to declare a variable in SeaTunnel's configuration, and then dynamically replace the value of the variable at runtime?
+
+Since `v1.2.4` SeaTunnel supports variables substitution in the configuration. This feature is often used for timing or non-timing offline processing to replace variables such as time and date. The usage is as follows:
+
+Configure the variable name in the configuration, here is an example of sql transform (actually anywhere in the configuration file the value in `'key = value'` can use the variable substitution):
+
+```
+...
+transform {
+  sql {
+    sql = "select * from user_view where city ='"${city}"' and dt = '"${date}"'"
+  }
+}
+...
+```
+
+Taking Spark Local mode as an example, the startup command is as follows:
+
+```bash
+./bin/start-seatunnel-spark.sh \
+-c ./config/your_app.conf \
+-e client \
+-m local[2] \
+-i city=shanghai \
+-i date=20190319
+```
+
+You can use the parameter `-i` or `--variable` followed with `key=value` to specify the value of the variable, where the key needs to be same as the variable name in the configuration.
+
+## How to write a configuration item into multi-line text in the configuration file?
+
+When a configured text is very long and you want to wrap it, you can use three double quotes to indicate it:
+
+```
+var = """
+ whatever you want
+"""
+```
+
+## How to implement variable substitution for multi-line text?
+
+It is a little troublesome to do variable substitution in multi-line text, because the variable cannot be included in three double quotation marks:
+
+```
+var = """
+your string 1
+"""${you_var}""" your string 2"""
+```
+
+refer to: [lightbend/config#456](https://github.com/lightbend/config/issues/456)
+
+## Is SeaTunnel supportted in Azkaban, Oozie, DolphinScheduler?
+
+Of course, please see the screenshot below:
+
+![workflow.png](/image_en/workflow.png)
+
+![azkaban.png](/image_en/azkaban.png)
+
+## Does SeaTunnel have a case of configuring multiple sources, such as configuring  elasticsearch and hdfs in source at the same time?
+
+```
+env {
+	...
+}
+
+source {
+  hdfs { ... }	
+  elasticsearch { ... }
+  jdbc {...}
+}
+
+transform {
+	sql {
+	 sql = """
+	 	select .... from hdfs_table 
+	 	join es_table 
+	 	on hdfs_table.uid = es_table.uid where ..."""
+	}
+}
+
+sink {
+	elasticsearch { ... }
+}
+```
+
+## Are there any HBase plugins?
+
+There is hbase input plugin, download it from here: https://github.com/garyelephant/waterdrop-input-hbase
+
+## How to use SeaTunnel to write data to Hive?
+
+```
+env {
+  spark.sql.catalogImplementation = "hive"
+  spark.hadoop.hive.exec.dynamic.partition = "true"
+  spark.hadoop.hive.exec.dynamic.partition.mode = "nonstrict"
+}
+
+source {
+  sql = "insert into ..."
+}
+
+sink {
+    // The data has been written to hive through the sql source, this is just a placeholder, it does not actually work.
+    stdout {
+        limit = 1
+    }
+}
+```
+
+In addition, SeaTunnel has implemented `Hive` output plugin after `1.5.7` in `1.x` branch; in `2.x` branch, the Hive plugin of the Spark engine has been supported after version `2.0.5`: https://github.com/apache/incubator-seatunnel/issues/910.
+
+## How does SeaTunnel write multiple instances of ClickHouse to achieve load balancing?
+
+1. Write distributed tables directly (not recommended)
+
+2. By adding a proxy or domain name (DNS) in front of multiple instances of ClickHouse:
+
+   ```
+   {
+       output {
+           clickhouse {
+               host = "ck-proxy.xx.xx:8123"
+               # Local table
+               table = "table_name"
+           }
+       }
+   }
+   ```
+
+3. Configure multiple instances in the configuration:
+
+   ```
+   {
+       output {
+           clickhouse {
+               host = "ck1:8123,ck2:8123,ck3:8123"
+               # Local table
+               table = "table_name"
+           }
+       }
+   }
+   ```
+
+4. Use cluster mode:
+
+   ```
+   {
+       output {
+           clickhouse {
+               # Configure only one host
+               host = "ck1:8123"
+               cluster = "clickhouse_cluster_name"
+               # Local table
+               table = "table_name"
+           }
+       }
+   }
+   ```
+
+## How to solve OOM when SeaTunnel consumes Kafka?
+
+In most cases, OOM is caused by the fact that there is no rate limit for consumption. The solution is as follows:
+
+Regarding the current limit of Spark consumption of Kafka:
+
+1. Suppose the number of partitions of Kafka `Topic 1` you consume with KafkaStream = N.
+
+2. Assuming that the production speed of the message producer (Producer) of `Topic 1` is K messages/second, it is required that The speed of write message to the partition is uniform.
+
+3. Suppose that after testing, it is found that the processing capacity of Spark Executor per core per second is M per second.
+
+The following conclusions can be drawn:
+
+1. If you want to make spark's consumption of `Topic 1` keep up with its production speed, then you need `spark.executor.cores` * `spark.executor.instances` >= K / M
+
+2. When data delay occurs, if you want the consumption speed not to be too fast, resulting in spark executor OOM, then you need to configure `spark.streaming.kafka.maxRatePerPartition` <= (`spark.executor.cores` * `spark.executor.instances`) * M / N
+
+3. In general, both M and N are determined, and the conclusion can be drawn from 2: The size of `spark.streaming.kafka.maxRatePerPartition` is positively correlated with the size of `spark.executor.cores` * `spark.executor.instances`, and it can be increased while increasing the resource `maxRatePerPartition` to speed up consumption.
+
+![kafka](/image_en/kafka.png)
+
+## How to solve the Error `Exception in thread "main" java.lang.NoSuchFieldError: INSTANCE`?
+
+The reason is that the version of httpclient.jar that comes with the CDH version of Spark is lower, and The httpclient version where ClickHouse JDBC is based on is 4.5.2, and the package version conflicts. The solution is to replace the jar package that comes with CDH with httpclient-4.5.2 version.
+
+## The default JDK of my Spark cluster is JDK7. After I install JDK8, how can I specify the SeaTunnel starts with JDK8?
+
+In SeaTunnel's config file, specify the following configuration:
+
+```shell
+spark {
+ ...
+ spark.executorEnv.JAVA_HOME="/your/java_8_home/directory"
+ spark.yarn.appMasterEnv.JAVA_HOME="/your/java_8_home/directory"
+ ...
+}
+```
+
+## How to specify a different JDK version for SeaTunnel on Yarn?
+
+For example, if you want to set the JDK version to JDK8, there are two cases:
+
+- The Yarn cluster has deployed JDK8, but the default JDK is not JDK8. you should only add 2 configurations to the SeaTunnel config file:
+
+    ```
+      env {
+     ...
+     spark.executorEnv.JAVA_HOME="/your/java_8_home/directory"
+     spark.yarn.appMasterEnv.JAVA_HOME="/your/java_8_home/directory"
+     ...
+    }
+  ```
+
+- Yarn cluster does not deploy JDK8. At this time, when you start SeaTunnel attached with JDK8.For detailed operations, see the link below:
+  https://www.cnblogs.com/jasondan/p/spark-specific-jdk-version.html
+
+## What should I do if OOM always appears when running SeaTunnel in Spark local[*] mode?
+
+If you run in local mode, you need to modify the start-seatunnel.sh startup script after  spark-submit, add a parameter `--driver-memory 4g` . Under normal circumstances, the local mode is not used in the production environment. Therefore, this parameter generally does not need to be set during On Yarn. See: [Application Properties](https://spark.apache.org/docs/latest/configuration.html#application-properties) for details .
+
+## Where can the self-written plugins or third-party jdbc.jar be placed to be loaded by SeaTunnel?
+
+Place the Jar package under the specified structure of the plugins directory:
+
+```bash
+cd SeaTunnel
+mkdir -p plugins/my_plugins/lib
+cp third-part.jar plugins/my_plugins/lib
+```
+
+`my_plugins` can be any string.
+
+## How to configure logging related parameters in SeaTunnel-v1(Spark)?
+
+There are 3 ways to configure Logging related parameters (such as Log Level):
+
+- [Not recommended] Change the default `$SPARK_HOME/conf/log4j.properties`
+   - This will affect all programs submitted via `$SPARK_HOME/bin/spark-submit`
+- [Not recommended] Modify logging related parameters directly in the Spark code of SeaTunnel
+   - This is equivalent to writing dead, and each change needs to be recompiled
+- [Recommended] Use the following methods to change the logging configuration in the SeaTunnel configuration file(It only takes effect after SeaTunnel >= 1.5.5 ):
+
+    ```
+    env {
+        spark.driver.extraJavaOptions = "-Dlog4j.configuration=file:<file path>/log4j.properties"
+        spark.executor.extraJavaOptions = "-Dlog4j.configuration=file:<file path>/log4j.properties"
+    }
+    source {
+      ...
+    }
+    transform {
+     ...
+    }
+    sink {
+      ...
+    }
+    ```
+
+The contents of the log4j configuration file for reference are as follows:
+
+```
+$ cat log4j.properties
+log4j.rootLogger=ERROR, console
+
+# set the log level for these components
+log4j.logger.org=ERROR
+log4j.logger.org.apache.spark=ERROR
+log4j.logger.org.spark-project=ERROR
+log4j.logger.org.apache.hadoop=ERROR
+log4j.logger.io.netty=ERROR
+log4j.logger.org.apache.zookeeper=ERROR
+
+# add a ConsoleAppender to the logger stdout to write to the console
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+# use a simple message format
+log4j.appender.console.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n
+```
+
+How to configure logging related parameters in SeaTunnel-v2(Spark, Flink)?
+
+Currently, it cannot be set directly. The user needs to modify the SeaTunnel startup script.The relevant parameters are specified in the task submission command. For specific parameters, please refer to the official document:
+
+- Spark official documentation: http://spark.apache.org/docs/latest/configuration.html#configuring-logging
+- Flink official documentation: https://ci.apache.org/projects/flink/flink-docs-stable/monitoring/logging.html
+
+Reference:
+
+https://stackoverflow.com/questions/27781187/how-to-stop-info-messages-displaying-on-spark-console
+
+http://spark.apache.org/docs/latest/configuration.html#configuring-logging
+
+https://medium.com/@iacomini.riccardo/spark-logging-configuration-in-yarn-faf5ba5fdb01
+
+https://stackoverflow.com/questions/27781187/how-to-stop-info-messages-displaying-on-spark-console
+
+## Error when writing to ClickHouse: ClassCastException
+
+In SeaTunnel, the data type will not be actively converted. After the Input reads the data, the corresponding
+
+Schema. When writing ClickHouse, the field type needs to be strictly matched, and the mismatch needs to be done.
+
+Data conversion, data conversion can be achieved through the following two plug-ins:
+
+1. Filter Convert plugin
+2. Filter Sql plugin
+
+Detailed data type conversion reference: [ClickHouse Data Type Check List](https://interestinglab.github.io/seatunnel-docs/#/en/configuration/output-plugins/Clickhouse?id=clickhouse-data-type-check-list)
+
+Refer to issue:[#488](https://github.com/apache/incubator-seatunnel/issues/488) [#382](https://github.com/apache/incubator-seatunnel/issues/382)
+
+## How does SeaTunnel access kerberos-authenticated HDFS, YARN, Hive and other resources?
+
+Please refer to: [#590](https://github.com/apache/incubator-seatunnel/issues/590)
+
+## How to troubleshoot NoClassDefFoundError, ClassNotFoundException and other issues?
+
+There is a high probability that there are multiple different versions of the corresponding Jar package class loaded in the Java classpath, because of the conflict of the load order, instead of really missing this Jar, please modify this SeaTunnel startup command, add the following parameters to the spark-submit submission place, and debug in detail through the output log.
+
+```
+spark-submit --verbose
+    ...
+   --conf 'spark.driver.extraJavaOptions=-verbose:class'
+   --conf 'spark.executor.extraJavaOptions=-verbose:class'
+    ...
+```
+
+## How to use SeaTunnel to synchronize data across HDFS clusters?
+
+Just configure hdfs-site.xml properly, refer to: https://www.cnblogs.com/suanec/p/7828139.html
+
+There is an article on how to modify the spark code to complete the configuration(SeaTunnel does not need to do this): https://www.jianshu.com/p/3e84c4c97610
+
+## I want to learn the source code of SeaTunnel, where should I start?
+
+SeaTunnel has a completely abstract and structured code implementation, and many people have chosen  SeaTunnel As a way to learn Spark, you can learn the source code from the main program entry: [Seatunnel.java](https://github.com/apache/incubator-seatunnel/blob/72b57b22688f17376fe7e5cf522b4bdd3f62cce0/seatunnel-core/seatunnel-core-base/src/main/java/org/apache/seatunnel/Seatunnel.java)
+
+## When SeaTunnel developers develop their own plugins, do they need to understand the SeaTunnel code? Should these code integrated into the SeaTunnel project?
+
+The plug-in developed by the developer has nothing to do with the SeaTunnel project and does not need to write your plug-in code
+
+The plugin can be completely independent with SeaTunnel project, in which you can use java,
+
+Scala, maven, sbt, gradle, whatever you want. This is also the way we recommend developers to develop plugins.
+
+## Import project, compiler has exception "class not found `org.apache.seatunnel.shade.com.typesafe.config.Config`"
+
+Run `mvn install` first.
+
+Because in the `seatunnel-config/seatunnel-config-base` subproject, package `com.typesafe.config` has been relocated to `org.apache.seatunnel.shade.com.typesafe.config` and install to maven local repository in subproject `seatunnel-config/seatunnel-config-shade` .
diff --git a/versioned_docs/version-2.1.1/intro/about.md b/versioned_docs/version-2.1.1/intro/about.md
new file mode 100644
index 00000000..6a528a30
--- /dev/null
+++ b/versioned_docs/version-2.1.1/intro/about.md
@@ -0,0 +1,72 @@
+---
+sidebar_position: 1
+---
+
+# About Seatunnel
+
+<img src="https://seatunnel.apache.org/image/logo.png" alt="seatunnel logo" width="200px" height="200px" align="right" />
+
+[![Slack](https://img.shields.io/badge/slack-%23seatunnel-4f8eba?logo=slack)](https://join.slack.com/t/apacheseatunnel/shared_invite/zt-123jmewxe-RjB_DW3M3gV~xL91pZ0oVQ)
+[![Twitter Follow](https://img.shields.io/twitter/follow/ASFSeaTunnel.svg?label=Follow&logo=twitter)](https://twitter.com/ASFSeaTunnel)
+
+SeaTunnel is a very easy-to-use ultra-high-performance distributed data integration platform that supports real-time
+synchronization of massive data. It can synchronize tens of billions of data stably and efficiently every day, and has
+been used in the production of nearly 100 companies.
+
+## Use Scenarios
+
+- Mass data synchronization
+- Mass data integration
+- ETL with massive data
+- Mass data aggregation
+- Multi-source data processing
+
+## Features
+
+- Easy to use, flexible configuration, low code development
+- Real-time streaming
+- Offline multi-source data analysis
+- High-performance, massive data processing capabilities
+- Modular and plug-in mechanism, easy to extend
+- Support data processing and aggregation by SQL
+- Support Spark structured streaming
+- Support Spark 2.x
+
+## Workflow
+
+![seatunnel-workflow.svg](/image_en/seatunnel-workflow.svg)
+
+```text
+Source[Data Source Input] -> Transform[Data Processing] -> Sink[Result Output]
+```
+
+The data processing pipeline is constituted by multiple filters to meet a variety of data processing needs. If you are
+accustomed to SQL, you can also directly construct a data processing pipeline by SQL, which is simple and efficient.
+Currently, the filter list supported by SeaTunnel is still being expanded. Furthermore, you can develop your own data
+processing plug-in, because the whole system is easy to expand.
+
+## Connector
+
+- Input plugin Fake, File, Hdfs, Kafka, Druid, InfluxDB, S3, Socket, self-developed Input plugin
+
+- Filter plugin Add, Checksum, Convert, Date, Drop, Grok, Json, Kv, Lowercase, Remove, Rename, Repartition, Replace,
+  Sample, Split, Sql, Table, Truncate, Uppercase, Uuid, Self-developed Filter plugin
+
+- Output plugin Elasticsearch, File, Hdfs, Jdbc, Kafka, Druid, InfluxDB, Mysql, S3, Stdout, self-developed Output plugin
+
+## Who Use SeaTunnel
+
+SeaTunnel have lots of users which you can find more information in [users](https://seatunnel.apache.org/user)
+
+## Landscapes
+
+<p align="center">
+<br/><br/>
+<img src="https://landscape.cncf.io//image_en/left-logo.svg" width="150" alt=""/>&nbsp;&nbsp;<img src="https://landscape.cncf.io//image_en/right-logo.svg" width="200" alt=""/>
+<br/><br/>
+SeaTunnel enriches the <a href="https://landscape.cncf.io/landscape=observability-and-analysis&license=apache-license-2-0">CNCF CLOUD NATIVE Landscape.</a >
+</p >
+
+## What's More
+
+You can see [Quick Start](/docs/category/start) for the next step.
diff --git a/versioned_docs/version-2.1.1/intro/history.md b/versioned_docs/version-2.1.1/intro/history.md
new file mode 100644
index 00000000..1d62ea69
--- /dev/null
+++ b/versioned_docs/version-2.1.1/intro/history.md
@@ -0,0 +1,15 @@
+---
+sidebar_position: 3
+---
+
+# History of Seatunnel
+
+SeaTunnel was formerly named WaterDrop
+
+## Rename to SeaTunnel
+
+This project renamed to SeaTunnel since Oct 12th, 2021.
+
+## Enter Apache Software Foundation’s Incubator
+
+SeaTunnel joined the Apache Software Foundation’s Incubator program in Dec 9th, 2021.
diff --git a/versioned_docs/version-2.1.1/intro/why.md b/versioned_docs/version-2.1.1/intro/why.md
new file mode 100644
index 00000000..d6de2e21
--- /dev/null
+++ b/versioned_docs/version-2.1.1/intro/why.md
@@ -0,0 +1,13 @@
+---
+sidebar_position: 2
+---
+
+# Why SeaTunnel
+
+SeaTunnel will do its best to solve the problems that may be encountered in the synchronization of massive data:
+
+- Data loss and duplication
+- Task accumulation and delay
+- Low throughput
+- Long cycle to be applied in the production environment
+- Lack of application running status monitoring
diff --git a/versioned_docs/version-2.1.1/introduction/about.md b/versioned_docs/version-2.1.1/introduction/about.md
new file mode 100644
index 00000000..6a528a30
--- /dev/null
+++ b/versioned_docs/version-2.1.1/introduction/about.md
@@ -0,0 +1,72 @@
+---
+sidebar_position: 1
+---
+
+# About Seatunnel
+
+<img src="https://seatunnel.apache.org/image/logo.png" alt="seatunnel logo" width="200px" height="200px" align="right" />
+
+[![Slack](https://img.shields.io/badge/slack-%23seatunnel-4f8eba?logo=slack)](https://join.slack.com/t/apacheseatunnel/shared_invite/zt-123jmewxe-RjB_DW3M3gV~xL91pZ0oVQ)
+[![Twitter Follow](https://img.shields.io/twitter/follow/ASFSeaTunnel.svg?label=Follow&logo=twitter)](https://twitter.com/ASFSeaTunnel)
+
+SeaTunnel is a very easy-to-use ultra-high-performance distributed data integration platform that supports real-time
+synchronization of massive data. It can synchronize tens of billions of data stably and efficiently every day, and has
+been used in the production of nearly 100 companies.
+
+## Use Scenarios
+
+- Mass data synchronization
+- Mass data integration
+- ETL with massive data
+- Mass data aggregation
+- Multi-source data processing
+
+## Features
+
+- Easy to use, flexible configuration, low code development
+- Real-time streaming
+- Offline multi-source data analysis
+- High-performance, massive data processing capabilities
+- Modular and plug-in mechanism, easy to extend
+- Support data processing and aggregation by SQL
+- Support Spark structured streaming
+- Support Spark 2.x
+
+## Workflow
+
+![seatunnel-workflow.svg](/image_en/seatunnel-workflow.svg)
+
+```text
+Source[Data Source Input] -> Transform[Data Processing] -> Sink[Result Output]
+```
+
+The data processing pipeline is constituted by multiple filters to meet a variety of data processing needs. If you are
+accustomed to SQL, you can also directly construct a data processing pipeline by SQL, which is simple and efficient.
+Currently, the filter list supported by SeaTunnel is still being expanded. Furthermore, you can develop your own data
+processing plug-in, because the whole system is easy to expand.
+
+## Connector
+
+- Input plugin Fake, File, Hdfs, Kafka, Druid, InfluxDB, S3, Socket, self-developed Input plugin
+
+- Filter plugin Add, Checksum, Convert, Date, Drop, Grok, Json, Kv, Lowercase, Remove, Rename, Repartition, Replace,
+  Sample, Split, Sql, Table, Truncate, Uppercase, Uuid, Self-developed Filter plugin
+
+- Output plugin Elasticsearch, File, Hdfs, Jdbc, Kafka, Druid, InfluxDB, Mysql, S3, Stdout, self-developed Output plugin
+
+## Who Use SeaTunnel
+
+SeaTunnel have lots of users which you can find more information in [users](https://seatunnel.apache.org/user)
+
+## Landscapes
+
+<p align="center">
+<br/><br/>
+<img src="https://landscape.cncf.io//image_en/left-logo.svg" width="150" alt=""/>&nbsp;&nbsp;<img src="https://landscape.cncf.io//image_en/right-logo.svg" width="200" alt=""/>
+<br/><br/>
+SeaTunnel enriches the <a href="https://landscape.cncf.io/landscape=observability-and-analysis&license=apache-license-2-0">CNCF CLOUD NATIVE Landscape.</a >
+</p >
+
+## What's More
+
+You can see [Quick Start](/docs/category/start) for the next step.
diff --git a/versioned_docs/version-2.1.1/introduction/history.md b/versioned_docs/version-2.1.1/introduction/history.md
new file mode 100644
index 00000000..1d62ea69
--- /dev/null
+++ b/versioned_docs/version-2.1.1/introduction/history.md
@@ -0,0 +1,15 @@
+---
+sidebar_position: 3
+---
+
+# History of Seatunnel
+
+SeaTunnel was formerly named WaterDrop
+
+## Rename to SeaTunnel
+
+This project renamed to SeaTunnel since Oct 12th, 2021.
+
+## Enter Apache Software Foundation’s Incubator
+
+SeaTunnel joined the Apache Software Foundation’s Incubator program in Dec 9th, 2021.
diff --git a/versioned_docs/version-2.1.1/introduction/why.md b/versioned_docs/version-2.1.1/introduction/why.md
new file mode 100644
index 00000000..d6de2e21
--- /dev/null
+++ b/versioned_docs/version-2.1.1/introduction/why.md
@@ -0,0 +1,13 @@
+---
+sidebar_position: 2
+---
+
+# Why SeaTunnel
+
+SeaTunnel will do its best to solve the problems that may be encountered in the synchronization of massive data:
+
+- Data loss and duplication
+- Task accumulation and delay
+- Low throughput
+- Long cycle to be applied in the production environment
+- Lack of application running status monitoring
diff --git a/versioned_docs/version-2.1.1/start/docker.md b/versioned_docs/version-2.1.1/start/docker.md
new file mode 100644
index 00000000..2553b997
--- /dev/null
+++ b/versioned_docs/version-2.1.1/start/docker.md
@@ -0,0 +1,8 @@
+---
+sidebar_position: 3
+---
+
+# Set Up with Docker
+
+<!-- TODO -->
+WIP
\ No newline at end of file
diff --git a/versioned_docs/version-2.1.1/start/kubernetes.mdx b/versioned_docs/version-2.1.1/start/kubernetes.mdx
new file mode 100644
index 00000000..bc867d1a
--- /dev/null
+++ b/versioned_docs/version-2.1.1/start/kubernetes.mdx
@@ -0,0 +1,268 @@
+---
+sidebar_position: 4
+---
+
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
+
+# Set Up with Kubernetes
+
+This section provides a quick guide to using SeaTunnel with Kubernetes. 
+
+## Prerequisites
+
+We assume that you have a local installations of the following:
+
+- [docker](https://docs.docker.com/)
+- [kubernetes](https://kubernetes.io/)
+- [helm](https://helm.sh/docs/intro/quickstart/)
+
+So that the `kubectl` and `helm` commands are available on your local system.
+
+For kubernetes [minikube](https://minikube.sigs.k8s.io/docs/start/) is our choice, at the time of writing this we are using version v1.23.3. You can start a cluster with the following command:
+
+```bash
+minikube start --kubernetes-version=v1.23.3
+```
+
+## Installation
+
+### SeaTunnel docker image
+
+To run the image with SeaTunnel, first create a `Dockerfile`:
+
+<Tabs
+  groupId="engine-type"
+  defaultValue="flink"
+  values={[
+    {label: 'Flink', value: 'flink'},
+  ]}>
+<TabItem value="flink">
+
+```Dockerfile
+FROM flink:1.13
+
+ENV SEATUNNEL_VERSION="2.1.0"
+
+RUN wget https://archive.apache.org/dist/incubator/seatunnel/${SEATUNNEL_VERSION}/apache-seatunnel-incubating-${SEATUNNEL_VERSION}-bin.tar.gz
+RUN tar -xzvf apache-seatunnel-incubating-${SEATUNNEL_VERSION}-bin.tar.gz
+
+RUN mkdir -p $FLINK_HOME/usrlib
+RUN cp apache-seatunnel-incubating-${SEATUNNEL_VERSION}/lib/seatunnel-core-flink.jar $FLINK_HOME/usrlib/seatunnel-core-flink.jar
+
+RUN rm -fr apache-seatunnel-incubating-${SEATUNNEL_VERSION}*
+```
+
+Then run the following commands to build the image:
+```bash
+docker build -t seatunnel:2.1.0-flink-1.13 -f Dockerfile .
+```
+Image `seatunnel:2.1.0-flink-1.13` need to be present in the host (minikube) so that the deployment can take place.
+
+Load image to minikube via: 
+```bash
+minikube image load seatunnel:2.1.0-flink-1.13
+```
+
+</TabItem>
+</Tabs>
+
+### Deploying the operator
+
+<Tabs
+  groupId="engine-type"
+  defaultValue="flink"
+  values={[
+    {label: 'Flink', value: 'flink'},
+  ]}>
+<TabItem value="flink">
+
+The steps below provide a quick walk-through on setting up the Flink Kubernetes Operator. 
+
+Install the certificate manager on your Kubernetes cluster to enable adding the webhook component (only needed once per Kubernetes cluster):
+
+```bash
+kubectl create -f https://github.com/jetstack/cert-manager/releases/download/v1.7.1/cert-manager.yaml
+```
+Now you can deploy the latest stable Flink Kubernetes Operator version using the included Helm chart:
+
+```bash
+
+helm repo add flink-operator-repo https://downloads.apache.org/flink/flink-kubernetes-operator-0.1.0/
+
+helm install flink-kubernetes-operator flink-operator-repo/flink-kubernetes-operator
+```
+
+You may verify your installation via `kubectl`:
+
+```bash
+kubectl get pods
+NAME                                                   READY   STATUS    RESTARTS      AGE
+flink-kubernetes-operator-5f466b8549-mgchb             1/1     Running   3 (23h ago)   16d
+
+```
+
+</TabItem>
+</Tabs>
+
+## Run SeaTunnel Application
+
+**Run Application:**: SeaTunnel already providers out-of-the-box [configurations](https://github.com/apache/incubator-seatunnel/tree/dev/config).
+
+<Tabs
+  groupId="engine-type"
+  defaultValue="flink"
+  values={[
+    {label: 'Flink', value: 'flink'},
+  ]}>
+<TabItem value="flink">
+
+In this guide we are going to use [flink.streaming.conf](https://github.com/apache/incubator-seatunnel/blob/dev/config/flink.streaming.conf.template):
+ 
+ ```conf
+env {
+  execution.parallelism = 1
+}
+
+source {
+    FakeSourceStream {
+      result_table_name = "fake"
+      field_name = "name,age"
+    }
+}
+
+transform {
+    sql {
+      sql = "select name,age from fake"
+    }
+}
+
+sink {
+  ConsoleSink {}
+}
+ ```
+
+This configuration need to be present when we are going to deploy the application (SeaTunnel) to Flink cluster (on Kubernetes), we also need to configure a Pod to Use a PersistentVolume for Storage.
+- Create `/mnt/data` on your Node. Open a shell to the single Node in your cluster. How you open a shell depends on how you set up your cluster. For example, in our case weare using Minikube, you can open a shell to your Node by entering `minikube ssh`. 
+In your shell on that Node, create a /mnt/data directory:
+```bash
+minikube ssh
+
+# This assumes that your Node uses "sudo" to run commands
+# as the superuser
+sudo mkdir /mnt/data
+```
+- Copy application (SeaTunnel) configuration files to your Node.
+```bash
+minikube cp flink.streaming.conf /mnt/data/flink.streaming.conf
+```
+
+Once the Flink Kubernetes Operator is running as seen in the previous steps you are ready to submit a Flink (SeaTunnel) job:
+- Create `seatunnel-flink.yaml` FlinkDeployment manifest:
+```yaml
+apiVersion: flink.apache.org/v1alpha1
+kind: FlinkDeployment
+metadata:
+  namespace: default
+  name: seatunnel-flink-streaming-example
+spec:
+  image: seatunnel:2.1.0-flink-1.13
+  flinkVersion: v1_14
+  flinkConfiguration:
+    taskmanager.numberOfTaskSlots: "2"
+  serviceAccount: flink
+  jobManager:
+    replicas: 1
+    resource:
+      memory: "2048m"
+      cpu: 1
+  taskManager:
+    resource:
+      memory: "2048m"
+      cpu: 2
+  podTemplate:  
+    spec:
+      containers:
+        - name: flink-main-container
+          volumeMounts:
+            - mountPath: /data
+              name: config-volume
+      volumes:
+        - name: config-volume
+          hostPath:
+            path: "/mnt/data"
+            type: Directory
+
+  job:
+    jarURI: local:///opt/flink/usrlib/seatunnel-core-flink.jar
+    entryClass: org.apache.seatunnel.SeatunnelFlink
+    args: ["--config", "/data/flink.streaming.conf"]
+    parallelism: 2
+    upgradeMode: stateless
+
+```
+- Run the example application:
+```bash
+kubectl apply -f seatunnel-flink.yaml
+```
+</TabItem>
+</Tabs>
+
+**See The Output**
+
+<Tabs
+  groupId="engine-type"
+  defaultValue="flink"
+  values={[
+    {label: 'Flink', value: 'flink'},
+  ]}>
+<TabItem value="flink">
+
+You may follow the logs of your job, after a successful startup (which can take on the order of a minute in a fresh environment, seconds afterwards) you can:
+
+```bash
+kubectl logs -f deploy/seatunnel-flink-streaming-example
+```
+
+To expose the Flink Dashboard you may add a port-forward rule:
+```bash
+kubectl port-forward svc/seatunnel-flink-streaming-example-rest 8081
+```
+Now the Flink Dashboard is accessible at [localhost:8081](http://localhost:8081).
+
+Or launch `minikube dashboard` for a web-based Kubernetes user interface.
+
+The content printed in the TaskManager Stdout log:
+```bash
+kubectl logs \
+-l 'app in (seatunnel-flink-streaming-example), component in (taskmanager)' \
+--tail=-1 \
+-f
+```
+looks like the below (your content may be different since we use `FakeSourceStream` to automatically generate random stream data):
+
+```shell
++I[Kid Xiong, 1650316786086]
++I[Ricky Huo, 1650316787089]
++I[Ricky Huo, 1650316788089]
++I[Ricky Huo, 1650316789090]
++I[Kid Xiong, 1650316790090]
++I[Kid Xiong, 1650316791091]
++I[Kid Xiong, 1650316792092]
+```
+
+To stop your job and delete your FlinkDeployment you can simply:
+
+```bash
+kubectl delete -f seatunnel-flink.yaml
+```
+</TabItem>
+</Tabs>
+
+
+Happy SeaTunneling!
+
+## What's More
+
+For now, you are already taking a quick look at SeaTunnel, you could see [connector](/category/connector) to find all source and sink SeaTunnel supported. 
+Or see [deployment](../deployment.mdx) if you want to submit your application in another kind of your engine cluster.
diff --git a/versioned_docs/version-2.1.1/start/local.mdx b/versioned_docs/version-2.1.1/start/local.mdx
new file mode 100644
index 00000000..0e57c4b0
--- /dev/null
+++ b/versioned_docs/version-2.1.1/start/local.mdx
@@ -0,0 +1,150 @@
+---
+sidebar_position: 2
+---
+
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
+
+# Set Up with Locally
+
+## Prepare
+
+Before you getting start the local run, you need to make sure you already have installed the following software which SeaTunnel required:
+
+* [Java](https://www.java.com/en/download/) (only JDK 8 supported by now) installed and `JAVA_HOME` set.
+* Download the engine, you can choose and download one of them from below as your favour, you could see more information about [why we need engine in SeaTunnel](../faq.md#why-i-should-install-computing-engine-like-spark-or-flink)
+  * Spark: Please [download Spark](https://spark.apache.org/downloads.html) first(**required version >= 2** and version < 3.x). For more information you could
+  see [Getting Started: standalone](https://spark.apache.org/docs/latest/spark-standalone.html#installing-spark-standalone-to-a-cluster)
+  * Flink: Please [download Flink](https://flink.apache.org/downloads.html) first(**required version >= 1.9.0**). For more information you could see [Getting Started: standalone](https://nightlies.apache.org/flink/flink-docs-release-1.14/docs/deployment/resource-providers/standalone/overview/)
+
+## Installation
+
+Enter the [seatunnel download page](https://seatunnel.apache.org/download) and download the latest version of distribute
+package `seatunnel-<version>-bin.tar.gz`
+
+Or you can download it by terminal
+
+```shell
+export version="2.1.0"
+wget "https://archive.apache.org/dist/incubator/seatunnel/${version}/apache-seatunnel-incubating-${version}-bin.tar.gz"
+tar -xzvf "apache-seatunnel-incubating-${version}-bin.tar.gz"
+```
+
+<!-- TODO: We should add example module as quick start which is no need for install Spark or Flink -->
+
+## Run SeaTunnel Application
+
+**Configure SeaTunnel**: Change the setting in `config/seatunnel-env.sh`, it is base on the path your engine install at [prepare step two](#prepare).
+Change `SPARK_HOME` if you using Spark as your engine, or change `FLINK_HOME` if you're using Flink.
+
+**Run Application with Build-in Configure**: We already providers and out-of-box configuration in directory `config` which
+you could find when you extract the tarball. You could start the application by the following commands
+
+<Tabs
+  groupId="engine-type"
+  defaultValue="spark"
+  values={[
+    {label: 'Spark', value: 'spark'},
+    {label: 'Flink', value: 'flink'},
+  ]}>
+<TabItem value="spark">
+
+```shell
+cd "apache-seatunnel-incubating-${version}"
+./bin/start-seatunnel-spark.sh \
+--master local[4] \
+--deploy-mode client \
+--config ./config/spark.streaming.conf.template
+```
+
+</TabItem>
+<TabItem value="flink">
+
+```shell
+cd "apache-seatunnel-incubating-${version}"
+./bin/start-seatunnel-flink.sh \
+--config ./config/flink.streaming.conf.template
+```
+
+</TabItem>
+</Tabs>
+
+**See The Output**: When you run the command, you could see its output in your console or in Flink UI, You can think this
+is a sign that the command ran successfully or not.
+
+<Tabs
+  groupId="engine-type"
+  defaultValue="spark"
+  values={[
+    {label: 'Spark', value: 'spark'},
+    {label: 'Flink', value: 'flink'},
+  ]}>
+<TabItem value="spark">
+The SeaTunnel console will prints some logs as below:
+
+```shell
+Hello World, SeaTunnel
+Hello World, SeaTunnel
+Hello World, SeaTunnel
+...
+Hello World, SeaTunnel
+```
+
+</TabItem>
+<TabItem value="flink">
+
+The content printed in the TaskManager Stdout log of `flink WebUI`, is two columned record just like below(your
+content maybe different cause we use fake source to create data random):
+
+```shell
+apache, 15
+seatunnel, 30
+incubator, 20
+...
+topLevel, 20
+```
+
+</TabItem>
+</Tabs>
+
+## Explore More Build-in Examples
+
+Our local quick start is using one of the build-in example in directory `config`, and we provider more than one out-of-box
+example you could and feel free to have a try and make your hands dirty. All you have to do is change the started command
+option value in [running application](#run-seaTunnel-application) to the configuration you want to run, we use batch
+template in `config` as examples:
+
+<Tabs
+    groupId="engine-type"
+    defaultValue="spark"
+    values={[
+        {label: 'Spark', value: 'spark'},
+        {label: 'Flink', value: 'flink'},
+    ]}>
+<TabItem value="spark">
+
+```shell
+cd "apache-seatunnel-incubating-${version}"
+./bin/start-seatunnel-spark.sh \
+--master local[4] \
+--deploy-mode client \
+--config ./config/spark.batch.conf.template
+```
+
+</TabItem>
+<TabItem value="flink">
+
+```shell
+cd "apache-seatunnel-incubating-${version}"
+./bin/start-seatunnel-flink.sh \
+--config ./config/flink.batch.conf.template
+```
+
+</TabItem>
+</Tabs>
+
+## What's More
+
+For now, you are already take a quick look about SeaTunnel, you could see [connector](/category/connector) to find all
+source and sink SeaTunnel supported. Or see [deployment](../deployment.mdx) if you want to submit your application in other
+kind of your engine cluster.
diff --git a/versioned_docs/version-2.1.1/transform/common-options.mdx b/versioned_docs/version-2.1.1/transform/common-options.mdx
new file mode 100644
index 00000000..766f9070
--- /dev/null
+++ b/versioned_docs/version-2.1.1/transform/common-options.mdx
@@ -0,0 +1,116 @@
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
+
+# Common Options
+
+:::tip
+
+This transform both supported by engine Spark and Flink.
+
+:::
+
+## Transform Plugin common parameters
+
+<Tabs
+    groupId="engine-type"
+    defaultValue="spark"
+    values={[
+        {label: 'Spark', value: 'spark'},
+        {label: 'Flink', value: 'flink'},
+    ]}>
+<TabItem value="spark">
+
+| name              | type   | required | default value |
+| ----------------- | ------ | -------- | ------------- |
+| source_table_name | string | no       | -             |
+| result_table_name | string | no       | -             |
+
+</TabItem>
+<TabItem value="flink">
+
+| name              | type   | required | default value |
+| ----------------- | ------ | -------- | ------------- |
+| source_table_name | string | no       | -             |
+| result_table_name | string | no       | -             |
+| field_name        | string | no       | -             |
+
+### field_name [string]
+
+When the data is obtained from the upper-level plugin, you can specify the name of the obtained field, which is convenient for use in subsequent sql plugins.
+
+</TabItem>
+</Tabs>
+
+### source_table_name [string]
+
+When `source_table_name` is not specified, the current plug-in processes the data set `(dataset)` output by the previous plug-in in the configuration file;
+
+When `source_table_name` is specified, the current plugin is processing the data set corresponding to this parameter.
+
+### result_table_name [string]
+
+When `result_table_name` is not specified, the data processed by this plugin will not be registered as a data set that can be directly accessed by other plugins, or called a temporary table `(table)`;
+
+When `result_table_name` is specified, the data processed by this plugin will be registered as a data set `(dataset)` that can be directly accessed by other plugins, or called a temporary table `(table)` . The dataset registered here can be directly accessed by other plugins by specifying `source_table_name` .
+
+## Examples
+
+<Tabs
+    groupId="engine-type"
+    defaultValue="spark"
+    values={[
+        {label: 'Spark', value: 'spark'},
+        {label: 'Flink', value: 'flink'},
+    ]}>
+<TabItem value="spark">
+
+```bash
+split {
+    source_table_name = "source_view_table"
+    source_field = "message"
+    delimiter = "&"
+    fields = ["field1", "field2"]
+    result_table_name = "result_view_table"
+}
+```
+
+> The `Split` plugin will process the data in the temporary table `source_view_table` and register the processing result as a temporary table named `result_view_table`. This temporary table can be used by any subsequent `Filter` or `Output` plugin by specifying `source_table_name` .
+
+```bash
+split {
+    source_field = "message"
+    delimiter = "&"
+    fields = ["field1", "field2"]
+}
+```
+
+> Note: If `source_table_name` is not configured, output the processing result of the last `Transform` plugin in the configuration file
+
+</TabItem>
+<TabItem value="flink">
+
+```bash
+source {
+    FakeSourceStream {
+      result_table_name = "fake_1"
+      field_name = "name,age"
+    }
+    FakeSourceStream {
+      result_table_name = "fake_2"
+      field_name = "name,age"
+    }
+}
+
+transform {
+    sql {
+      source_table_name = "fake_1"
+      sql = "select name from fake_1"
+      result_table_name = "fake_name"
+    }
+}
+```
+
+> If `source_table_name` is not specified, the sql plugin will process the data of `fake_2` , and if it is set to `fake_1` , it will process the data of `fake_1` .
+
+</TabItem>
+</Tabs>
\ No newline at end of file
diff --git a/versioned_docs/version-2.1.1/transform/json.md b/versioned_docs/version-2.1.1/transform/json.md
new file mode 100644
index 00000000..3068fe7a
--- /dev/null
+++ b/versioned_docs/version-2.1.1/transform/json.md
@@ -0,0 +1,195 @@
+# Json
+
+## Description
+
+Json analysis of the specified fields of the original data set
+
+:::tip
+
+This transform **ONLY** supported by Spark.
+
+:::
+
+## Options
+
+| name           | type   | required | default value |
+| -------------- | ------ | -------- | ------------- |
+| source_field   | string | no       | raw_message   |
+| target_field   | string | no       | __root__      |
+| schema_dir     | string | no       | -             |
+| schema_file    | string | no       | -             |
+| common-options | string | no       | -             |
+
+### source_field [string]
+
+Source field, if not configured, the default is `raw_message`
+
+### target_field [string]
+
+The target field, if it is not configured, the default is `__root__` , and the result of Json parsing will be uniformly placed at the top of the `Dataframe`
+
+### schema_dir [string]
+
+Style directory, if not configured, the default is `$seatunnelRoot/plugins/json/files/schemas/`
+
+### schema_file [string]
+
+The style file name, if it is not configured, the default is empty, that is, the structure is not specified, and the system derives it by itself according to the input of the data source.
+
+### common options [string]
+
+Transform plugin common parameters, please refer to [Transform Plugin](common-options.mdx) for details
+
+## Schema Use cases
+
+- `json schema` usage scenarios
+
+The multiple data sources of a single task may contain different styles of json data. For example, the `topicA` style from `Kafka` is
+
+```json
+{
+  "A": "a_val",
+  "B": "b_val"
+}
+```
+
+The style from `topicB` is
+
+```json
+{
+  "C": "c_val",
+  "D": "d_val"
+}
+```
+
+When running `Transform` , you need to fuse the data of `topicA` and `topicB` into a wide table for calculation. You can specify a `schema` whose content style is:
+
+```json
+{
+  "A": "a_val",
+  "B": "b_val",
+  "C": "c_val",
+  "D": "d_val"
+}
+```
+
+Then the fusion output result of `topicA` and `topicB` is:
+
+```bash
++-----+-----+-----+-----+
+|A    |B    |C    |D    |
++-----+-----+-----+-----+
+|a_val|b_val|null |null |
+|null |null |c_val|d_val|
++-----+-----+-----+-----+
+```
+
+## Examples
+
+### Do not use `target_field`
+
+```bash
+json {
+    source_field = "message"
+}
+```
+
+- Source
+
+```bash
++----------------------------+
+|message                   |
++----------------------------+
+|{"name": "ricky", "age": 24}|
+|{"name": "gary", "age": 28} |
++----------------------------+
+```
+
+- Sink
+
+```bash
++----------------------------+---+-----+
+|message                   |age|name |
++----------------------------+---+-----+
+|{"name": "gary", "age": 28} |28 |gary |
+|{"name": "ricky", "age": 23}|23 |ricky|
++----------------------------+---+-----+
+```
+
+### Use `target_field`
+
+```bash
+json {
+    source_field = "message"
+    target_field = "info"
+}
+```
+
+- Souce
+
+```bash
++----------------------------+
+|message                   |
++----------------------------+
+|{"name": "ricky", "age": 24}|
+|{"name": "gary", "age": 28} |
++----------------------------+
+```
+
+- Sink
+
+```bash
++----------------------------+----------+
+|message                   |info      |
++----------------------------+----------+
+|{"name": "gary", "age": 28} |[28,gary] |
+|{"name": "ricky", "age": 23}|[23,ricky]|
++----------------------------+----------+
+```
+
+> The results of json processing support `select * from where info.age = 23` such SQL statements
+
+### Use `schema_file`
+
+```bash
+json {
+    source_field = "message"
+    schema_file = "demo.json"
+}
+```
+
+- Schema
+
+Place the following content in `~/seatunnel/plugins/json/files/schemas/demo.json` of Driver Node:
+
+```json
+{
+   "name": "demo",
+   "age": 24,
+   "city": "LA"
+}
+```
+
+- Source
+
+```bash
++----------------------------+
+|message                   |
++----------------------------+
+|{"name": "ricky", "age": 24}|
+|{"name": "gary", "age": 28} |
++----------------------------+
+```
+
+- Sink
+
+```bash
++----------------------------+---+-----+-----+
+|message                     |age|name |city |
++----------------------------+---+-----+-----+
+|{"name": "gary", "age": 28} |28 |gary |null |
+|{"name": "ricky", "age": 23}|23 |ricky|null |
++----------------------------+---+-----+-----+
+```
+
+> If you use `cluster mode` for deployment, make sure that the `json schemas` directory is packaged in `plugins.tar.gz`
diff --git a/versioned_docs/version-2.1.1/transform/split.mdx b/versioned_docs/version-2.1.1/transform/split.mdx
new file mode 100644
index 00000000..ecfe3871
--- /dev/null
+++ b/versioned_docs/version-2.1.1/transform/split.mdx
@@ -0,0 +1,122 @@
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
+
+# Split
+
+## Description
+
+A string cutting function is defined, which is used to split the specified field in the Sql plugin.
+
+:::tip
+
+This transform both supported by engine Spark and Flink.
+
+:::
+
+## Options
+
+<Tabs
+    groupId="engine-type"
+    defaultValue="spark"
+    values={[
+        {label: 'Spark', value: 'spark'},
+        {label: 'Flink', value: 'flink'},
+    ]}>
+<TabItem value="spark">
+
+| name           | type   | required | default value |
+| -------------- | ------ | -------- | ------------- |
+| separator      | string | no       | " "      |
+| fields         | array  | yes      | -             |
+| source_field   | string | no       | raw_message   |
+| target_field   | string | no       | *root*        |
+| common-options | string | no       | -             |
+
+### separator [string]
+
+Separator, the input string is separated according to the separator. The default separator is a space `(" ")` .
+Note: If you use some special characters in the separator, you need to escape it. e.g. "\\|"
+
+### source_field [string]
+
+The source field of the string before being split, if not configured, the default is `raw_message`
+
+### target_field [string]
+
+`target_field` can specify the location where multiple split fields are added to the Event. If it is not configured, the default is `_root_` , that is, all split fields will be added to the top level of the Event. If a specific field is specified, the divided field will be added to the next level of this field.
+
+</TabItem>
+<TabItem value="flink">
+
+| name           | type   | required | default value |
+| -------------- | ------ | -------- | ------------- |
+| separator      | string | no       | ,             |
+| fields         | array  | yes      | -             |
+| common-options | string | no       | -             |
+
+### separator [string]
+
+The specified delimiter, the default is `,`
+
+</TabItem>
+</Tabs>
+
+### fields [list]
+
+In the split field name list, specify the field names of each character string after splitting in order. If the length of the `fields` is greater than the length of the separation result, the extra fields are assigned null characters.
+
+### common options [string]
+
+Transform plugin common parameters, please refer to [Transform Plugin](common-options.mdx) for details
+
+## Examples
+
+<Tabs
+    groupId="engine-type"
+    defaultValue="spark"
+    values={[
+        {label: 'Spark', value: 'spark'},
+        {label: 'Flink', value: 'flink'},
+    ]}>
+<TabItem value="spark">
+
+Split the `message` field in the source data according to `&`, you can use `field1` or `field2` as the key to get the corresponding value
+
+```bash
+split {
+    source_field = "message"
+    separator = "&"
+    fields = ["field1", "field2"]
+}
+```
+
+Split the `message` field in the source data according to `,` , the split field is `info` , you can use `info.field1` or `info.field2` as the key to get the corresponding value
+
+```bash
+split {
+    source_field = "message"
+    target_field = "info"
+    separator = ","
+    fields = ["field1", "field2"]
+}
+```
+
+</TabItem>
+<TabItem value="flink">
+
+</TabItem>
+</Tabs>
+
+Use `Split` as udf in sql.
+
+```bash
+  # This just created a udf called split
+  Split{
+    separator = "#"
+    fields = ["name","age"]
+  }
+  # Use the split function (confirm that the fake table exists)
+  sql {
+    sql = "select * from (select raw_message,split(raw_message) as info_row from fake) t1"
+  }
+```
diff --git a/versioned_docs/version-2.1.1/transform/sql.md b/versioned_docs/version-2.1.1/transform/sql.md
new file mode 100644
index 00000000..9f8003d4
--- /dev/null
+++ b/versioned_docs/version-2.1.1/transform/sql.md
@@ -0,0 +1,60 @@
+# Sql
+
+## Description
+
+Use SQL to process data and support engine's UDF function.
+
+:::tip
+
+This transform both supported by engine Spark and Flink.
+
+:::
+
+## Options
+
+| name           | type   | required | default value |
+| -------------- | ------ | -------- | ------------- |
+| sql            | string | yes      | -             |
+| common-options | string | no       | -             |
+
+### sql [string]
+
+SQL statement, the table name used in SQL configured in the `Source` or `Transform` plugin
+
+### common options [string]
+
+Transform plugin common parameters, please refer to [Transform Plugin](common-options.mdx) for details
+
+## Examples
+
+### Simple Select
+
+Use the SQL plugin for field deletion. Only the `username` and `address` fields are reserved, and the remaining fields will be discarded. `user_info` is the `result_table_name` configured by the previous plugin
+
+```bash
+sql {
+    sql = "select username, address from user_info",
+}
+```
+
+### Use UDF
+
+Use SQL plugin for data processing, use `substring` functions to intercept the `telephone` field
+
+```bash
+sql {
+    sql = "select substring(telephone, 0, 10) from user_info",
+}
+```
+
+### Use UDAF
+
+Use SQL plugin for data aggregation, use avg functions to perform aggregation operations on the original data set, and take out the average value of the `age` field
+
+```bash
+sql {
+    sql = "select avg(age) from user_info",
+    table_name = "user_info"
+}
+```
+
diff --git a/versioned_sidebars/version-2.1.1-sidebars.json b/versioned_sidebars/version-2.1.1-sidebars.json
new file mode 100644
index 00000000..775310a8
--- /dev/null
+++ b/versioned_sidebars/version-2.1.1-sidebars.json
@@ -0,0 +1,106 @@
+{
+  "docs": [
+    {
+      "type": "category",
+      "label": "Introduction",
+      "items": [
+        "intro/about",
+        "intro/why",
+        "intro/history"
+      ]
+    },
+    {
+      "type": "category",
+      "label": "Quick Start",
+      "link": {
+        "type": "generated-index",
+        "title": "Quick Start for SeaTunnel",
+        "description": "In this section, you could learn how to get up and running Apache SeaTunnel in both locally or in Docker environment.",
+        "slug": "/category/start",
+        "keywords": [
+          "start"
+        ],
+        "image": "/img/favicon.ico"
+      },
+      "items": [
+        "start/local",
+        "start/docker",
+        "start/kubernetes"
+      ]
+    },
+    {
+      "type": "category",
+      "label": "Connector",
+      "items": [
+        "connector/config-example",
+        {
+          "type": "category",
+          "label": "Source",
+          "link": {
+            "type": "generated-index",
+            "title": "Source of SeaTunnel",
+            "description": "List all source supported Apache SeaTunnel for now.",
+            "slug": "/category/source",
+            "keywords": [
+              "source"
+            ],
+            "image": "/img/favicon.ico"
+          },
+          "items": [
+            {
+              "type": "autogenerated",
+              "dirName": "connector/source"
+            }
+          ]
+        },
+        {
+          "type": "category",
+          "label": "Sink",
+          "link": {
+            "type": "generated-index",
+            "title": "Source of SeaTunnel",
+            "description": "List all sink supported Apache SeaTunnel for now.",
+            "slug": "/category/sink",
+            "keywords": [
+              "sink"
+            ],
+            "image": "/img/favicon.ico"
+          },
+          "items": [
+            {
+              "type": "autogenerated",
+              "dirName": "connector/sink"
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "type": "category",
+      "label": "Transform",
+      "items": [
+        "transform/common-options",
+        "transform/sql",
+        "transform/split",
+        "transform/json"
+      ]
+    },
+    {
+      "type": "category",
+      "label": "Command",
+      "items": [
+        "command/usage"
+      ]
+    },
+    "deployment",
+    {
+      "type": "category",
+      "label": "Contribution",
+      "items": [
+        "contribution/setup",
+        "contribution/new-license"
+      ]
+    },
+    "faq"
+  ]
+}
diff --git a/versions.json b/versions.json
index d1be432c..bae9830d 100644
--- a/versions.json
+++ b/versions.json
@@ -1,4 +1,5 @@
 [
+  "2.1.1",
   "2.1.0",
   "1.x"
 ]