You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@seatunnel.apache.org by zh...@apache.org on 2022/04/06 12:53:54 UTC

[incubator-seatunnel-website] branch main updated: Migrating 1.X version documentation to Apache repository (#104)

This is an automated email from the ASF dual-hosted git repository.

zhongjiajie pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/incubator-seatunnel-website.git


The following commit(s) were added to refs/heads/main by this push:
     new cdf618a8 Migrating 1.X version documentation to Apache repository (#104)
cdf618a8 is described below

commit cdf618a8af0544f94fd5b757f8f2ce1bceb8940c
Author: Kirs <ac...@163.com>
AuthorDate: Wed Apr 6 20:53:49 2022 +0800

    Migrating 1.X version documentation to Apache repository (#104)
    
    Co-authored-by: Jiajie Zhong <zh...@hotmail.com>
---
 docusaurus.config.js                               |   6 +-
 src/pages/versions/config.json                     |   7 +
 versioned_docs/version-1.x/configuration/base.md   | 114 ++++++++
 .../version-1.x/configuration/filter-plugin.md     |  45 +++
 .../configuration/filter-plugins/Add.md            |  34 +++
 .../configuration/filter-plugins/Checksum.md       |  41 +++
 .../configuration/filter-plugins/Convert.md        |  38 +++
 .../configuration/filter-plugins/Date.md           |  91 ++++++
 .../configuration/filter-plugins/Drop.md           |  31 +++
 .../configuration/filter-plugins/Grok.md           |  77 +++++
 .../configuration/filter-plugins/Join.md           |  55 ++++
 .../configuration/filter-plugins/Json.md           | 184 ++++++++++++
 .../version-1.x/configuration/filter-plugins/Kv.md | 126 +++++++++
 .../configuration/filter-plugins/Lowercase.md      |  33 +++
 .../configuration/filter-plugins/Remove.md         |  29 ++
 .../configuration/filter-plugins/Repartition.md    |  28 ++
 .../configuration/filter-plugins/Replace.md        |  51 ++++
 .../configuration/filter-plugins/Sample.md         |  34 +++
 .../configuration/filter-plugins/Split.md          |  63 +++++
 .../configuration/filter-plugins/Sql.md            |  54 ++++
 .../configuration/filter-plugins/Table.md          |  71 +++++
 .../configuration/filter-plugins/Truncate.md       |  38 +++
 .../configuration/filter-plugins/Uppercase.md      |  33 +++
 .../configuration/filter-plugins/Uuid.md           |  28 ++
 .../configuration/filter-plugins/Watermark.md      |  33 +++
 .../version-1.x/configuration/input-plugin.md      |  31 +++
 .../configuration/input-plugins/Alluxio.md         |  50 ++++
 .../configuration/input-plugins/Fake.md            |  88 ++++++
 .../configuration/input-plugins/File.md            |  39 +++
 .../configuration/input-plugins/Hdfs.md            |  35 +++
 .../configuration/input-plugins/Jdbc.md            |  64 +++++
 .../configuration/input-plugins/KafkaStream.md     |  65 +++++
 .../configuration/input-plugins/MySQL.md           |  58 ++++
 .../configuration/input-plugins/RedisStream.md     |  75 +++++
 .../version-1.x/configuration/input-plugins/S3.md  |  27 ++
 .../configuration/input-plugins/Socket.md          |  24 ++
 .../version-1.x/configuration/output-plugin.md     |  32 +++
 .../configuration/output-plugins/Alluxio.md        |  81 ++++++
 .../configuration/output-plugins/Clickhouse.md     | 110 ++++++++
 .../configuration/output-plugins/Elasticsearch.md  |  77 +++++
 .../configuration/output-plugins/File.md           |  66 +++++
 .../configuration/output-plugins/Hdfs.md           |  67 +++++
 .../configuration/output-plugins/Jdbc.md           |  61 ++++
 .../configuration/output-plugins/Kafka.md          |  41 +++
 .../configuration/output-plugins/MySQL.md          |  54 ++++
 .../version-1.x/configuration/output-plugins/S3.md |  66 +++++
 .../configuration/output-plugins/Stdout.md         |  33 +++
 versioned_docs/version-1.x/deployment.md           |  58 ++++
 versioned_docs/version-1.x/developing-plugin.md    | 309 +++++++++++++++++++++
 versioned_docs/version-1.x/installation.md         |  33 +++
 versioned_docs/version-1.x/internal.md             |  33 +++
 versioned_docs/version-1.x/introduction.md         | 178 ++++++++++++
 versioned_docs/version-1.x/monitoring.md           | 291 +++++++++++++++++++
 versioned_docs/version-1.x/quick-start.md          | 130 +++++++++
 versioned_sidebars/version-1.x-sidebars.json       |  73 +++++
 versions.json                                      |   3 +-
 56 files changed, 3690 insertions(+), 6 deletions(-)

diff --git a/docusaurus.config.js b/docusaurus.config.js
index 4722f464..6f8aa048 100644
--- a/docusaurus.config.js
+++ b/docusaurus.config.js
@@ -89,13 +89,9 @@ const config = {
                             to: `docs/${versions[0]}/introduction`,
                         },
                         ...versions.slice(1).map((version) => ({
-                            label: version,
+                            label: (version === "1.x") ? "1.x(Not Apache Release)" : version,
                             to: `docs/${version}/introduction`,
                         })),
-                        {
-                            label: "1.x(Not apache release)",
-                            to: "https://interestinglab.github.io/seatunnel-docs/#/zh-cn/v1/"
-                        },
                         {
                             label: "Next",
                             to: "/docs/intro/about",
diff --git a/src/pages/versions/config.json b/src/pages/versions/config.json
index 70cfc89b..fc7d47f5 100644
--- a/src/pages/versions/config.json
+++ b/src/pages/versions/config.json
@@ -69,6 +69,13 @@
           "downloadUrl": "https://github.com/apache/incubator-seatunnel/releases/tag/2.1.0",
           "sourceTag": "2.1.0"
         }
+      ],
+      "historyData1.x": [
+        {
+          "versionLabel": "1.x",
+          "docUrl": "/docs/1.x/introduction",
+          "sourceTag": "1.x"
+        }
       ]
     }
   }
diff --git a/versioned_docs/version-1.x/configuration/base.md b/versioned_docs/version-1.x/configuration/base.md
new file mode 100644
index 00000000..cf296de1
--- /dev/null
+++ b/versioned_docs/version-1.x/configuration/base.md
@@ -0,0 +1,114 @@
+# General configuration
+
+## Core idea
+
+* Row is a piece of data in the logical sense of seatunnel, and is the basic unit of data processing. When Filter processes data, all data will be mapped to Row.
+
+* Field is a field of Row. Row can contain nested levels of fields.
+
+* raw_message refers to the `raw_message` field in the Row for the data entered from the input.
+
+* __root__ refers to the same field level as the top-level field of Row, and is often used to specify the storage location (top level field) of new fields generated during data processing in Row.
+
+
+---
+
+## config file
+
+A complete seatunnel configuration includes `spark`, `input`, `filter`, `output`, namely:
+
+````
+spark {
+    ...
+}
+
+input {
+    ...
+}
+
+filter {
+    ...
+}
+
+output {
+    ...
+}
+
+````
+
+* `spark` is spark related configuration,
+
+Configurable spark parameters see:
+[Spark Configuration](https://spark.apache.org/docs/latest/configuration.html#available-properties),
+Among them, the two parameters of master and deploy-mode cannot be configured here and need to be specified in the seatunnel startup script.
+
+* `input` can configure any input plugin and its parameters, and the specific parameters vary with different input plugins.
+
+* `filter` can configure any filter plugin and its parameters, and the specific parameters vary with different filter plugins.
+
+Multiple plugins in the filter form a data processing pipeline in the configuration order, and the output of the previous filter is the input of the next filter.
+
+* `output` can configure any output plugin and its parameters, and the specific parameters vary with different output plugins.
+
+The data processed by `filter` will be sent to each plugin configured in `output`.
+
+
+---
+
+## Configuration file example
+
+An example is as follows:
+
+> In configuration, behavior comments beginning with `#`.
+
+````
+spark {
+  # You can set spark configuration here
+  # seatunnel defined streaming batch duration in seconds
+  spark.streaming.batchDuration = 5
+
+  # see available properties defined by spark: https://spark.apache.org/docs/latest/configuration.html#available-properties
+  spark.app.name = "seatunnel"
+  spark.executor.instances = 2
+  spark.executor.cores = 1
+  spark.executor.memory = "1g"
+}
+
+input {
+  # This is an example input plugin **only for test and demonstrate the feature input plugin**
+  fakestream {
+    content = ["Hello World, InterestingLab"]
+    rate = 1
+  }
+
+
+  # If you would like to get more information about how to configure seatunnel and see full list of input plugins,
+  # please go to https://interestinglab.github.io/seatunnel-docs/#/en-us/v1/configuration/base
+}
+
+filter {
+  split {
+    fields = ["msg", "name"]
+    delimiter = ","
+  }
+
+  # If you would like to get more information about how to configure seatunnel and see full list of filter plugins,
+  # please go to https://interestinglab.github.io/seatunnel-docs/#/en-us/v1/configuration/base
+}
+
+output {
+  stdout {}
+
+
+  # If you would like to get more information about how to configure seatunnel and see full list of output plugins,
+  # please go to https://interestinglab.github.io/seatunnel-docs/#/en-us/v1/configuration/base
+}
+````
+
+For other configurations, please refer to:
+
+[Configuration Example 1: Streaming Streaming Computing](https://github.com/InterestingLab/seatunnel/blob/master/config/streaming.conf.template)
+
+[Configuration example 2: Batch offline batch](https://github.com/InterestingLab/seatunnel/blob/master/config/batch.conf.template)
+
+[Configuration example 3: A flexible multi-data process processing](https://github.com/InterestingLab/seatunnel/blob/master/config/complex.conf.template)
\ No newline at end of file
diff --git a/versioned_docs/version-1.x/configuration/filter-plugin.md b/versioned_docs/version-1.x/configuration/filter-plugin.md
new file mode 100644
index 00000000..e5b38ce9
--- /dev/null
+++ b/versioned_docs/version-1.x/configuration/filter-plugin.md
@@ -0,0 +1,45 @@
+# Filter plugin
+
+### Filter plugin general parameters
+
+| name | type | required | default value |
+| --- | --- | --- | --- |
+| [source_table_name](#source_table_name-string) | string | no | - |
+| [result_table_name](#result_table_name-string) | string | no | - |
+
+
+##### source_table_name [string]
+
+When `source_table_name` is not specified, the current plugin processes the dataset output by the previous plugin in the configuration file;
+
+When `source_table_name` is specified, the current plugin processes the dataset corresponding to this parameter.
+
+##### result_table_name [string]
+
+When `result_table_name is not specified`, the data processed by this plugin will not be registered as a dataset that can be directly accessed by other plugins, or called a temporary table;
+
+When `result_table_name` is specified, the data processed by this plugin will be registered as a dataset that can be directly accessed by other plugins, or called a temporary table. The dataset registered here, other plugins can directly access by specifying `source_table_name`.
+
+### Usage example
+
+````
+split {
+    source_table_name = "view_table_1"
+    source_field = "message"
+    delimiter = "&"
+    fields = ["field1", "field2"]
+    result_table_name = "view_table_2"
+}
+````
+
+> The `Split` plugin will process the data in the temporary table `view_table_1` and register the processing result as a temporary table named `view_table_2`, this temporary table can be specified by any subsequent `Filter` or `Output` plugins `source_table_name` is used.
+
+````
+split {
+    source_field = "message"
+    delimiter = "&"
+    fields = ["field1", "field2"]
+}
+````
+
+> Without `source_table_name` configured, the `Split` plugin will read the dataset passed by the previous plugin and pass it to the next plugin.
\ No newline at end of file
diff --git a/versioned_docs/version-1.x/configuration/filter-plugins/Add.md b/versioned_docs/version-1.x/configuration/filter-plugins/Add.md
new file mode 100644
index 00000000..96e2c9ed
--- /dev/null
+++ b/versioned_docs/version-1.x/configuration/filter-plugins/Add.md
@@ -0,0 +1,34 @@
+## Filter plugin : Add
+
+* Author: InterestingLab
+* Homepage: https://interestinglab.github.io/seatunnel-docs
+* Version: 1.0.0
+
+### Description
+
+Add a field with fixed value to Rows.
+
+### Options
+
+| name | type | required | default value |
+| --- | --- | --- | --- |
+| [target_field](#target_field-string) | string | yes | - |
+| [value](#value-string) | string | yes | - |
+
+##### target_field [string]
+
+New field name.
+
+##### value [string]
+
+New field value.
+
+### Examples
+
+```
+add {
+    value = "1"
+}
+```
+
+> Add a field, the value is "1"
diff --git a/versioned_docs/version-1.x/configuration/filter-plugins/Checksum.md b/versioned_docs/version-1.x/configuration/filter-plugins/Checksum.md
new file mode 100644
index 00000000..e767a5c8
--- /dev/null
+++ b/versioned_docs/version-1.x/configuration/filter-plugins/Checksum.md
@@ -0,0 +1,41 @@
+## Filter plugin : Checksum
+
+* Author: InterestingLab
+* Homepage: https://interestinglab.github.io/seatunnel-docs
+* Version: 1.0.0
+
+### Description
+
+Calculate checksum(default algorithm is SHA1) of specific field and add a new field with the checksum value.
+
+### Options
+
+| name | type | required | default value |
+| --- | --- | --- | --- |
+| [method](#method-string) | string | no | SHA1 |
+| [source_field](#source_field-string) | string | no | raw_message |
+| [target_field](#target_field-string) | string | no | checksum |
+
+##### method [string]
+
+Checksum algorithm, supports SHA1,MD5 and CRC32 now.
+
+##### source_field [string]
+
+Source field
+
+##### target_field [string]
+
+Target field
+
+### Examples
+
+```
+checksum {
+    source_field = "deviceId"
+    target_field = "device_crc32"
+    method = "CRC32"
+}
+```
+
+> Get CRC32 checksum from `deviceId`, and set it to `device_crc32`
diff --git a/versioned_docs/version-1.x/configuration/filter-plugins/Convert.md b/versioned_docs/version-1.x/configuration/filter-plugins/Convert.md
new file mode 100644
index 00000000..fd36c351
--- /dev/null
+++ b/versioned_docs/version-1.x/configuration/filter-plugins/Convert.md
@@ -0,0 +1,38 @@
+## Filter plugin : Convert
+
+* Author: InterestingLab
+* Homepage: https://interestinglab.github.io/seatunnel-docs
+* Version: 1.0.0
+
+### Description
+
+Convert a field’s value to a different type, such as converting a string to an integer.
+
+### Options
+
+| name | type | required | default value |
+| --- | --- | --- | --- |
+| [new_type](#new_type-string) | string | yes | - |
+| [source_field](#source_field-string) | string | yes | - |
+
+##### new_type [string]
+
+Conversion type, supports `string`, `integer`, `long`, `float`, `double` and `boolean` now.
+
+##### source_field [string]
+
+Source field.
+
+
+### Examples
+
+```
+convert {
+    source_field = "age"
+    new_type = "integer"
+}
+```
+
+> Convert the `age` field to `integer` type
+
+
diff --git a/versioned_docs/version-1.x/configuration/filter-plugins/Date.md b/versioned_docs/version-1.x/configuration/filter-plugins/Date.md
new file mode 100644
index 00000000..a2266efd
--- /dev/null
+++ b/versioned_docs/version-1.x/configuration/filter-plugins/Date.md
@@ -0,0 +1,91 @@
+## Filter plugin : Date
+
+* Author: InterestingLab
+* Homepage: https://interestinglab.github.io/seatunnel-docs
+* Version: 1.0.0
+
+### Description
+
+The date filter is used for parsing dates from specified field.
+
+### Options
+
+| name | type | required | default value |
+| --- | --- | --- | --- |
+| [default_value](#default_value-string) | string | no | ${now} |
+| [locale](#locale-string) | string | no | Locale.US |
+| [source_field](#source_field-string) | string | no | \_\_root\_\_ |
+| [source_time_format](#source_time_format-string) | string | no | UNIX_MS |
+| [target_field](#target_field-string) | string | no | datetime |
+| [target_time_format](#target_time_format-string) | string | no | `yyyy/MM/dd HH:mm:ss` |
+| [time_zone](#time_zone-string) | string | no | - |
+
+##### default_value [string]
+
+If the date conversion fails, the current time(`${now}`) will be used in the specified format.
+
+
+##### locale [string]
+
+Locale of source field.
+
+##### source_field [string]
+
+Source field, if not configured, the current time will be used.
+
+##### source_time_format [string]
+
+Source field time format, currently supports UNIX(10-bit seconds timestamp), UNIX_MS(13-bit millisecond timestamp) and `SimpleDateFormat` format. The commonly used time formats are listed below:
+
+
+| Symbol | Description |
+| --- | --- |
+| y | Year |
+| M | Month |
+| d | Day of month |
+| H | Hour in day (0-23) |
+| m | Minute in hour |
+| s | Second in minute |
+
+The detailed time format syntax:[Java SimpleDateFormat](https://docs.oracle.com/javase/tutorial/i18n/format/simpleDateFormat.html).
+
+
+
+##### target_field [string]
+
+Target field, default is `datetime`.
+
+##### target_time_format [string]
+
+Target field time format, The detailed time format syntax:[Java SimpleDateFormat](https://docs.oracle.com/javase/tutorial/i18n/format/simpleDateFormat.html).
+
+
+##### time_zone [string]
+
+Timezone of source field
+
+
+### Examples
+
+```
+date {
+    source_field = "timestamp"
+    target_field = "date"
+    source_time_format = "UNIX"
+    target_time_format = "yyyy/MM/dd"
+}
+```
+
+> Convert the `timestamp` field from UNIX timestamp to the `yyyy/MM/dd` format.
+
+```
+date {
+    source_field = "httpdate"
+    target_field = "datetime"
+    source_time_format = "dd/MMM/yyyy:HH:mm:ss Z"
+    target_time_format = "yyyy/MM/dd HH:mm:ss"
+}
+```
+
+
+> Convert the `httpdate` field from `dd/MMM/yyyy:HH:mm:ss Z` format to the `yyyy/MM/dd HH:mm:ss` format
diff --git a/versioned_docs/version-1.x/configuration/filter-plugins/Drop.md b/versioned_docs/version-1.x/configuration/filter-plugins/Drop.md
new file mode 100644
index 00000000..04cea1a3
--- /dev/null
+++ b/versioned_docs/version-1.x/configuration/filter-plugins/Drop.md
@@ -0,0 +1,31 @@
+## Filter plugin : Drop
+
+* Author: InterestingLab
+* Homepage: https://interestinglab.github.io/seatunnel-docs
+* Version: 1.0.0
+
+### Description
+
+Drop Rows that match the condition
+
+
+### Options
+
+| name | type | required | default value |
+| --- | --- | --- | --- |
+| [condition](#condition-string) | string | yes | - |
+
+##### condition [string]
+
+Conditional expression, Rows that match this conditional expression will be dropped. Expressions in where clause of sql language can be used, such as `name = 'grayelephant'`, `status = 200 AND resp_time > 100`
+
+
+### Examples
+
+```
+drop {
+    condition = "status = '200'"
+}
+```
+
+> Rows will be dropped if status is 200
diff --git a/versioned_docs/version-1.x/configuration/filter-plugins/Grok.md b/versioned_docs/version-1.x/configuration/filter-plugins/Grok.md
new file mode 100644
index 00000000..ee1284c0
--- /dev/null
+++ b/versioned_docs/version-1.x/configuration/filter-plugins/Grok.md
@@ -0,0 +1,77 @@
+## Filter plugin : Grok
+
+* Author: InterestingLab
+* Homepage: https://interestinglab.github.io/seatunnel-docs
+* Version: 1.0.0
+
+### Description
+
+Parse arbitrary text into structured data with columns  using Grok Pattern. Please have a look at [available grok pattern](https://github.com/InterestingLab/seatunnel/blob/master/plugins/grok/files/grok-patterns/grok-patterns).
+
+You can also go to [http://grokdebug.herokuapp.com](http://grokdebug.herokuapp.com) to debug grok patterns if you need some hint for grok pattern syntax.
+
+
+
+### Options
+
+| name | type | required | default value |
+| --- | --- | --- | --- |
+| [named_captures_only](#named_captures_only-boolean) | boolean | no | true |
+| [pattern](#pattern-string) | string | yes | - |
+| [patterns_dir](#patterns_dir-string) | string | no | - |
+| [source_field](#source_field-string) | string | no | raw_message |
+| [target_field](#target_field-string) | string | no | __root__ |
+
+##### named_captures_only [boolean]
+
+If true, only store named captures from grok.
+
+##### pattern [string]
+
+Grok pattern.
+
+##### patterns_dir [string]
+
+The directory of pattern files. seatunnel ships by default with a bunch of [patterns]([grok-patterns文件](https://github.com/InterestingLab/seatunnel/tree/master/plugins/grok/files/grok-patterns)), so you don’t necessarily need to configure this unless you want to add additional patterns.
+
+
+##### source_field [string]
+
+Source field.
+
+##### target_field [string]
+
+Target field.
+
+### Example
+
+```
+grok {
+    source_field = "raw_message"
+    pattern = "%{WORD:name} is %{WORD:gender}, %{NUMBER:age} years old and weighs %{NUMBER:weight} kilograms"
+    target_field = "info_detail"
+}
+```
+
+* **Input**
+
+```
++----------------------------------------------------+
+|raw_message                                         |
++----------------------------------------------------+
+|gary is male, 25 years old and weighs 68.5 kilograms|
+|gary is male, 25 years old and weighs 68.5 kilograms|
++----------------------------------------------------+
+```
+
+* **Output**
+
+```
++----------------------------------------------------+------------------------------------------------------------+
+|raw_message                                         |info_detail                                                 |
++----------------------------------------------------+------------------------------------------------------------+
+|gary is male, 25 years old and weighs 68.5 kilograms|Map(age -> 25, gender -> male, name -> gary, weight -> 68.5)|
+|gary is male, 25 years old and weighs 68.5 kilograms|Map(age -> 25, gender -> male, name -> gary, weight -> 68.5)|
++----------------------------------------------------+------------------------------------------------------------+
+
+```
diff --git a/versioned_docs/version-1.x/configuration/filter-plugins/Join.md b/versioned_docs/version-1.x/configuration/filter-plugins/Join.md
new file mode 100644
index 00000000..d8916798
--- /dev/null
+++ b/versioned_docs/version-1.x/configuration/filter-plugins/Join.md
@@ -0,0 +1,55 @@
+## Filter plugin : Join
+
+* Author: InterestingLab
+* Homepage: https://interestinglab.github.io/seatunnel-docs
+* Version: 1.3.0
+
+### Description
+
+Joining a streaming Dataset/DataFrame with a static Dataset/DataFrame.
+
+### Options
+
+| name | type | required | default value |
+| --- | --- | --- | --- |
+| [source_field](#source_field-string) | string | no | raw_message |
+| [table_name](#table_name-string) | string | yes | - |
+
+##### source_field [string]
+
+Source field, default is `raw_message`.
+
+##### table_name [string]
+
+Static Dataset/DataFrame name.
+
+### Examples
+
+```
+input {
+  fakestream {
+    content = ["Hello World,seatunnel"]
+    rate = 1
+  }
+
+  mysql {
+    url = "jdbc:mysql://localhost:3306/info"
+    table = "project_info"
+    table_name = "access_log"
+    user = "username"
+    password = "password"
+  }
+}
+
+filter {
+  split {
+    fields = ["msg", "project"]
+    delimiter = ","
+  }
+
+  join {
+    table_name = "user_info"
+    source_field = "project"
+  }
+}
+```
diff --git a/versioned_docs/version-1.x/configuration/filter-plugins/Json.md b/versioned_docs/version-1.x/configuration/filter-plugins/Json.md
new file mode 100644
index 00000000..d2a1eaed
--- /dev/null
+++ b/versioned_docs/version-1.x/configuration/filter-plugins/Json.md
@@ -0,0 +1,184 @@
+## Filter plugin : Json
+
+* Author: InterestingLab
+* Homepage: https://interestinglab.github.io/seatunnel-docs
+* Version: 1.0.0
+
+### Description
+
+It takes an existing field which contains a json string and extract its fields.
+
+### Options
+
+| name | type | required | default value |
+| --- | --- | --- | --- |
+| [source_field](#source_field-string) | string | no | raw_message |
+| [target_field](#target_field-string) | string | no | \_\_root\_\_ |
+
+##### source_field [string]
+
+Source field, default is `raw_message`.
+
+##### target_field [string]
+
+New field name.
+
+##### schema_dir [string]
+
+Json schema absolute directory path,default is `$seatunnelRoot/plugins/json/files/schemas/`
+
+##### schema_file [string]
+
+Json schema file name, if not set, the system will infer the schema from input source.
+
+### Use cases
+
+1. `json schema` **use case**
+
+There might be multiple input json schemas in a single job, e.g. the schema in topicA of kafka can be:
+
+```json
+{
+  "A": "a_val",
+  "B": "b_val"
+}
+```
+
+the schema of topicB can be:
+
+```json
+{
+  "C": "c_val",
+  "D": "d_val"
+}
+```
+
+If we need to combine two schemas and make it output as a wide table, we can specify a schema with content below:
+
+```json
+{
+  "A": "a_val",
+  "B": "b_val",
+  "C": "c_val",
+  "D": "d_val"
+}
+```
+
+then the output of topicA and topicB would be:
+
+```
++-----+-----+-----+-----+
+|A    |B    |C    |D    |
++-----+-----+-----+-----+
+|a_val|b_val|null |null |
+|null |null |c_val|d_val|
++-----+-----+-----+-----+
+```
+
+
+### Examples
+
+1. Without `target_field`
+
+    ```
+    json {
+        source_field = "message"
+    }
+    ```
+
+    * **Input**
+
+    ```
+    +----------------------------+
+    |message                   |
+    +----------------------------+
+    |{"name": "ricky", "age": 24}|
+    |{"name": "gary", "age": 28} |
+    +----------------------------+
+    ```
+
+    * **Output**
+
+    ```
+    +----------------------------+---+-----+
+    |message                   |age|name |
+    +----------------------------+---+-----+
+    |{"name": "gary", "age": 28} |28 |gary |
+    |{"name": "ricky", "age": 23}|23 |ricky|
+    +----------------------------+---+-----+
+    ```
+
+2. With `target_field`
+
+    ```
+    json {
+        source_field = "message"
+        target_field = "info"
+    }
+    ```
+
+    * **Input**
+
+    ```
+    +----------------------------+
+    |message                   |
+    +----------------------------+
+    |{"name": "ricky", "age": 24}|
+    |{"name": "gary", "age": 28} |
+    +----------------------------+
+    ```
+
+    * **Output**
+
+    ```
+    +----------------------------+----------+
+    |message                   |info      |
+    +----------------------------+----------+
+    |{"name": "gary", "age": 28} |[28,gary] |
+    |{"name": "ricky", "age": 23}|[23,ricky]|
+    +----------------------------+----------+
+
+    ```
+
+3. With `schema_file`
+    ```
+    json {
+        source_field = "message"
+        schema_file = "demo.json"
+    }
+    ```
+    
+    * **Schema**
+    
+    Make the content of `/opt/seatunnel/plugins/json/files/schemas/demo.json` on `Driver node` as below:
+    
+    ```json
+    {
+       "name": "demo",
+       "age": 24,
+       "city": "LA"
+    }
+    ```
+    
+    * **Input**
+    ```
+    +----------------------------+
+    |message                   |
+    +----------------------------+
+    |{"name": "ricky", "age": 24}|
+    |{"name": "gary", "age": 28} |
+    +----------------------------+
+    ```
+    
+    * **Output**
+
+    ```
+    +----------------------------+---+-----+-----+
+    |message                     |age|name |city |
+    +----------------------------+---+-----+-----+
+    |{"name": "gary", "age": 28} |28 |gary |null |
+    |{"name": "ricky", "age": 23}|23 |ricky|null |
+    +----------------------------+---+-----+-----+
+    ```
+
+    > If deploy in `cluster` mode,make sure json schemas directory is packed in plugins.tar.gz
diff --git a/versioned_docs/version-1.x/configuration/filter-plugins/Kv.md b/versioned_docs/version-1.x/configuration/filter-plugins/Kv.md
new file mode 100644
index 00000000..c7e72505
--- /dev/null
+++ b/versioned_docs/version-1.x/configuration/filter-plugins/Kv.md
@@ -0,0 +1,126 @@
+## Filter plugin : Kv
+
+* Author: InterestingLab
+* Homepage: https://interestinglab.github.io/seatunnel-docs
+* Version: 1.0.0
+
+### Description
+
+Extract all Key-Values of the specified string field with configured `field_split`, which are often used to parse the url parameter.
+
+
+### Options
+
+| name | type | required | default value |
+| --- | --- | --- | --- |
+| [default_values](#default_values-array) | array | no | [] |
+| [exclude_fields](#exclude_fields-array) | array | no | [] |
+| [field_prefix](#field_prefix-string) | string | no |  |
+| [field_split](#field_split-string) | string | no | & |
+| [include_fields](#include_fields-array) | array | no | [] |
+| [source_field](#source_field-string) | string | no | raw_message |
+| [target_field](#target_field-string) | string | no | \_\_root\_\_ |
+| [value_split](#value_split-string) | string | no | = |
+
+##### default_values [array]
+
+Default values can be set by `default_values` by `key=defalut_value`(key and value are separated by `=`).
+
+Multiple default values are specified as follows: `default_values = ["mykey1=123", "mykey2=seatunnel"]`
+
+##### exclude_fields [array]
+
+Fields in the `exclude_fields` will be abandoned.
+
+##### field_prefix [string]
+
+A string to prepend to all of the extracted keys.
+
+##### field_split [string]
+
+A string of characters to use as single-character field delimiters for parsing key-value pairs.
+
+##### include_fields [array]
+
+An array specifying the parsed keys which should be added to the event.
+
+##### source_field [string]
+
+Source field.
+
+##### target_field [string]
+
+All extracted fields will be put into `target_field`.
+
+##### value_split [string]
+
+A non-empty string of characters to use as single-character value delimiters for parsing key-value pairs.
+
+### Examples
+
+1. With `target_field`
+
+    ```
+    kv {
+        source_field = "message"
+        target_field = "kv_map"
+        field_split = "&"
+        value_split = "="
+    }
+    ```
+
+    * **Input**
+
+    ```
+    +-----------------+
+    |message         |
+    +-----------------+
+    |name=ricky&age=23|
+    |name=gary&age=28 |
+    +-----------------+
+    ```
+
+    * **Output**
+
+    ```
+    +-----------------+-----------------------------+
+    |message          |kv_map                    |
+    +-----------------+-----------------------------+
+    |name=ricky&age=23|Map(name -> ricky, age -> 23)|
+    |name=gary&age=28 |Map(name -> gary, age -> 28) |
+    +-----------------+-----------------------------+
+    ```
+
+
+2. Without `target_field`
+
+    ```
+    kv {
+            source_field = "message"
+            field_split = "&"
+            value_split = "="
+        }
+    ```
+
+    * **Input**
+
+    ```
+    +-----------------+
+    |message         |
+    +-----------------+
+    |name=ricky&age=23|
+    |name=gary&age=28 |
+    +-----------------+
+    ```
+
+    * **Output**
+
+    ```
+    +-----------------+---+-----+
+    |message         |age|name |
+    +-----------------+---+-----+
+    |name=ricky&age=23|23 |ricky|
+    |name=gary&age=28 |28 |gary |
+    +-----------------+---+-----+
+
+    ```
diff --git a/versioned_docs/version-1.x/configuration/filter-plugins/Lowercase.md b/versioned_docs/version-1.x/configuration/filter-plugins/Lowercase.md
new file mode 100644
index 00000000..5c1b12f6
--- /dev/null
+++ b/versioned_docs/version-1.x/configuration/filter-plugins/Lowercase.md
@@ -0,0 +1,33 @@
+## Filter plugin : Lowercase
+
+* Author: InterestingLab
+* Homepage: https://interestinglab.github.io/seatunnel-docs
+* Version: 1.0.0
+
+### Description
+
+Lowercase specified string field.
+
+### Options
+
+| name | type | required | default value |
+| --- | --- | --- | --- |
+| [source_field](#source_field-string) | string | no | raw_message |
+| [target_field](#target_field-string) | string | no | lowercased |
+
+##### source_field [string]
+
+Source field, default is `raw_message`
+
+##### target_field [string]
+
+New field name, default is `lowercased`
+
+# Examples
+
+```
+lowercase {
+    source_field = "address"
+    target_field = "address_lowercased"
+}
+```
diff --git a/versioned_docs/version-1.x/configuration/filter-plugins/Remove.md b/versioned_docs/version-1.x/configuration/filter-plugins/Remove.md
new file mode 100644
index 00000000..32d1aed3
--- /dev/null
+++ b/versioned_docs/version-1.x/configuration/filter-plugins/Remove.md
@@ -0,0 +1,29 @@
+## Filter plugin : Remove
+
+* Author: InterestingLab
+* Homepage: https://interestinglab.github.io/seatunnel-docs
+* Version: 1.0.0
+
+### Description
+
+Remove all specified field from Rows.
+
+### Options
+
+| name | type | required | default value |
+| --- | --- | --- | --- |
+| [source_field](#source_field-array) | array | yes | - |
+
+##### source_field [array]
+
+Array of fields needed to be removed.
+
+### Examples
+
+```
+remove {
+    source_field = ["field1", "field2"]
+}
+```
+
+> Remove `field1` and `field2` from Rows.
diff --git a/versioned_docs/version-1.x/configuration/filter-plugins/Repartition.md b/versioned_docs/version-1.x/configuration/filter-plugins/Repartition.md
new file mode 100644
index 00000000..d141d191
--- /dev/null
+++ b/versioned_docs/version-1.x/configuration/filter-plugins/Repartition.md
@@ -0,0 +1,28 @@
+## Filter plugin : Repartition
+
+* Author: InterestingLab
+* Homepage: https://interestinglab.github.io/seatunnel-docs
+* Version: 1.0.0
+
+### Description
+
+Adjust the number of underlying spark rdd partition to increase or decrease degree of parallelism. This filter is mainly to adjust the data processing performance.
+
+
+### Options
+
+| name | type | required | default value |
+| --- | --- | --- | --- |
+| [num_partitions](#num_partitions-number) | number | yes | - |
+
+##### num_partitions [number]
+
+Target partition number.
+
+### Examples
+
+```
+repartition {
+    num_partitions = 8
+}
+```
diff --git a/versioned_docs/version-1.x/configuration/filter-plugins/Replace.md b/versioned_docs/version-1.x/configuration/filter-plugins/Replace.md
new file mode 100644
index 00000000..5896608e
--- /dev/null
+++ b/versioned_docs/version-1.x/configuration/filter-plugins/Replace.md
@@ -0,0 +1,51 @@
+## Filter plugin : Replace
+
+* Author: InterestingLab
+* Homepage: https://interestinglab.github.io/seatunnel-docs
+* Version: 1.0.0
+
+### Description
+
+Replaces field contents based on regular expression.
+
+### Options
+
+| name | type | required | default value |
+| --- | --- | --- | --- |
+| [pattern](#pattern-string) | string | yes | - |
+| [replacement](#replacement-string) | string | yes | - |
+| [source_field](#source_field-string) | string | no | raw_message |
+| [target_field](#target_field-string) | string | no | replaced |
+
+##### pattern [string]
+
+regular expression, such as [a-z0-9], \w, \d
+
+Regular expression used for matching string, such as `"[a-zA-Z0-9_-]+"`.Please see [Regex Pattern](https://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html) for details.
+
+You can also go to [Regex 101](https://regex101.com/) to test your regex interactively.
+
+##### replacement [string]
+
+String replacement.
+
+##### source_field [string]
+
+Source field, default is `raw_message`.
+
+##### target_field [string]
+
+New field name, default is `replaced`.
+
+### Examples
+
+```
+replace {
+    target_field = "tmp"
+    source_field = "message"
+    pattern = "\w+"
+    replacement = "are"
+}
+```
+
+> Replace **\w+** in `message` with **are** and set it to `tmp` column.
diff --git a/versioned_docs/version-1.x/configuration/filter-plugins/Sample.md b/versioned_docs/version-1.x/configuration/filter-plugins/Sample.md
new file mode 100644
index 00000000..6d873b07
--- /dev/null
+++ b/versioned_docs/version-1.x/configuration/filter-plugins/Sample.md
@@ -0,0 +1,34 @@
+## Filter plugin : Sample
+
+* Author: InterestingLab
+* Homepage: https://interestinglab.github.io/seatunnel-docs
+* Version: 1.0.0
+
+### Description
+
+Taking Samples from the events.
+
+### Options
+
+| name | type | required | default value |
+| --- | --- | --- | --- |
+| [fraction](#fraction-number) | number | no | 0.1 |
+| [limit](#limit-number) | number | no | -1 |
+
+##### fraction [number]
+
+The fraction of sampling. For example, `fraction=0.8` represents to extract `80%` data from the events.
+
+##### limit [number]
+
+The number of Rows after sampling, where `-1` represents no limit.
+
+### Examples
+
+```
+sample {
+    fraction = 0.8
+}
+```
+
+> Extract 80% of events.
diff --git a/versioned_docs/version-1.x/configuration/filter-plugins/Split.md b/versioned_docs/version-1.x/configuration/filter-plugins/Split.md
new file mode 100644
index 00000000..56442583
--- /dev/null
+++ b/versioned_docs/version-1.x/configuration/filter-plugins/Split.md
@@ -0,0 +1,63 @@
+## Filter plugin : Split
+
+* Author: InterestingLab
+* Homepage: https://interestinglab.github.io/seatunnel-docs
+* Version: 1.0.0
+
+### Description
+
+Splits String using delimiter.
+
+### Options
+
+| name | type | required | default value |
+| --- | --- | --- | --- |
+| [delimiter](#delimiter-string) | string | no | " "(Space) |
+| [fields](#fields-list) | list | yes | - |
+| [source_field](#source_field-string) | string | no | _root_ |
+| [target_field](#target_field-string) | string | no | raw_message |
+
+##### delimiter [string]
+
+The string to split on. Default is a whitespace.
+
+
+##### fields [list]
+
+The corresponding field names of splited fields. Order of field names is important.
+
+If the length of `fields` is greater than the length of splited fields, the extra fields will be set to empty string.
+
+##### source_field [string]
+
+Source field, default is `raw_message`.
+
+##### target_field [string]
+
+New field name, default is `__root__`, and the result of `Split` will be added on the top level of Rows.
+
+If you specify `target_field`, the result of 'Split' will be added under the top level of Rows.
+
+### Examples
+
+```
+split {
+    source_field = "message"
+    delimiter = "&"
+    fields = ["field1", "field2"]
+}
+```
+
+> The string of `message` is split by **&**, and the results set to `field1` and `field2`.
+
+```
+split {
+    source_field = "message"
+    target_field = "info"
+    delimiter = ","
+    fields = ["field1", "field2"]
+}
+```
+
+> The string of `message` is split by **&**, and the results set to `info.field1` and `info.field2`
+
diff --git a/versioned_docs/version-1.x/configuration/filter-plugins/Sql.md b/versioned_docs/version-1.x/configuration/filter-plugins/Sql.md
new file mode 100644
index 00000000..6995483b
--- /dev/null
+++ b/versioned_docs/version-1.x/configuration/filter-plugins/Sql.md
@@ -0,0 +1,54 @@
+## Filter plugin : Sql
+
+* Author: InterestingLab
+* Homepage: https://interestinglab.github.io/seatunnel-docs
+* Version: 1.0.0
+
+### Description
+
+Processing Rows using SQL, feel free to use [Spark UDF](http://spark.apache.org/docs/latest/api/sql/). 
+
+### Options
+
+| name | type | required | default value |
+| --- | --- | --- | --- |
+| [sql](#sql-string) | string | yes | - |
+| [table_name](#table_name-string) | string | yes | - |
+
+##### sql [string]
+
+SQL content.
+
+##### table_name [string]
+
+When `table` set, the current batch of events will be registered as a table, named by this `table` setting, on which you can execute sql.
+
+### Examples
+
+```
+sql {
+    sql = "select username, address from user_info",
+    table_name = "user_info"
+}
+```
+
+> Select the `username` and `address` fields, the remaining fields will be removed.
+
+```
+sql {
+    sql = "select substring(telephone, 0, 10) from user_info",
+    table_name = "user_info"
+}
+```
+
+> Use the [substring function](http://spark.apache.org/docs/latest/api/sql/#substring) to retrieve a substring on the `telephone` field.
+
+
+```
+sql {
+    sql = "select avg(age) from user_info",
+    table_name = "user_info"
+}
+```
+
+> Get the aggregation of the average of `age` using the [avg functions](http://spark.apache.org/docs/latest/api/sql/#avg).
diff --git a/versioned_docs/version-1.x/configuration/filter-plugins/Table.md b/versioned_docs/version-1.x/configuration/filter-plugins/Table.md
new file mode 100644
index 00000000..67c1c0a2
--- /dev/null
+++ b/versioned_docs/version-1.x/configuration/filter-plugins/Table.md
@@ -0,0 +1,71 @@
+## Filter plugin : Table
+
+* Author: InterestingLab
+* Homepage: https://interestinglab.github.io/seatunnel-docs
+* Version: 1.0.0
+
+### Description
+
+It is used to map static files into a table, which can be associated with real-time processed streams. 
+
+It is always used for joining user nicknames, national provinces and cities, etc.
+
+### Options
+
+| name | type | required | default value |
+| --- | --- | --- | --- |
+| [cache](#cache-boolean) | boolean | no | true |
+| [delimiter](#delimiter-string) | string | no | , |
+| [field_types](#field_types-array) | array | no | - |
+| [fields](#fields-array) | array | yes | - |
+| [path](#path-string) | string | yes | - |
+| [table_name](#table_name-string) | string | yes | - |
+
+##### cache [boolean]
+
+Whether to cache file contents in memory. If false, it will reload every time you need.
+
+##### delimiter [string]
+
+The delimiter between columns in the file.
+
+##### field_types [array]
+
+The type of each field, the order and length of `field_types` must correspond to the `fields` parameter. The default type of all columns is string. Supported data types include: `boolean`, `double`, `long`, `string`
+
+##### fields [array]
+
+The names of the columns in each row, while should be provided by the actual columns in the data in order.
+
+##### path [string]
+
+
+File path supported by Spark. For example, file:///path/to/file, hdfs:///path/to/file, s3:///path/to/file ...
+
+##### table_name [string]
+
+After loading the file, it will be registered as a table. Here, the table name is specified, which can be used to directly associate with the stream processing data.
+
+
+### Example
+
+> Without `field_types`
+
+```
+table {
+    table_name = "mydict"
+    path = "/user/seatunnel/mylog/a.txt"
+    fields = ['city', 'population']
+}
+```
+
+> With `field_types`
+
+```
+table {
+    table_name = "mydict"
+    path = "/user/seatunnel/mylog/a.txt"
+    fields = ['city', 'population']
+    field_types = ['string', 'long']
+}
+```
diff --git a/versioned_docs/version-1.x/configuration/filter-plugins/Truncate.md b/versioned_docs/version-1.x/configuration/filter-plugins/Truncate.md
new file mode 100644
index 00000000..2cf69d8f
--- /dev/null
+++ b/versioned_docs/version-1.x/configuration/filter-plugins/Truncate.md
@@ -0,0 +1,38 @@
+## Filter plugin : Truncate
+
+* Author: InterestingLab
+* Homepage: https://interestinglab.github.io/seatunnel-docs
+* Version: 1.0.0
+
+### Description
+
+Truncate string.
+
+### Options
+
+| name | type | required | default value |
+| --- | --- | --- | --- |
+| [max_length](#max_length-number) | number | no | 256 |
+| [source_field](#source_field-string) | string | no | raw_message |
+| [target_field](#target_field-string) | string | no | truncated |
+
+##### max_length [number]
+
+Maximum length of the string.
+
+##### source_field [string]
+
+Source field name, default is `raw_message`.
+
+##### target_field [string]
+
+New field name, default is `__root__`.
+
+### Example
+
+```
+truncate {
+    source_field = "telephone"
+    max_length = 10
+}
+```
diff --git a/versioned_docs/version-1.x/configuration/filter-plugins/Uppercase.md b/versioned_docs/version-1.x/configuration/filter-plugins/Uppercase.md
new file mode 100644
index 00000000..002a390f
--- /dev/null
+++ b/versioned_docs/version-1.x/configuration/filter-plugins/Uppercase.md
@@ -0,0 +1,33 @@
+## Filter plugin : Uppercase
+
+* Author: InterestingLab
+* Homepage: https://interestinglab.github.io/seatunnel-docs
+* Version: 1.0.0
+
+### Description
+
+Uppercase specified field.
+
+### Options
+
+| name | type | required | default value |
+| --- | --- | --- | --- |
+| [source_field](#source_field-string) | string | no | raw_message |
+| [target_field](#target_field-string) | string | no | uppercased |
+
+##### source_field [string]
+
+Source field, default is `raw_message`
+
+##### target_field [string]
+
+New field name, default is `uppercased`
+
+### Example
+
+```
+uppercase {
+    source_field = "username"
+    target_field = "username_uppercased"
+}
+```
diff --git a/versioned_docs/version-1.x/configuration/filter-plugins/Uuid.md b/versioned_docs/version-1.x/configuration/filter-plugins/Uuid.md
new file mode 100644
index 00000000..409d497b
--- /dev/null
+++ b/versioned_docs/version-1.x/configuration/filter-plugins/Uuid.md
@@ -0,0 +1,28 @@
+## Filter plugin : Uuid
+
+* Author: InterestingLab
+* Homepage: https://interestinglab.github.io/seatunnel-docs
+* Version: 1.0.0
+
+### Description
+
+Using Spark function `monotonically_increasing_id()` to add a globally unique and auto incrementing UUID field.
+
+
+### Options
+
+| name | type | required | default value |
+| --- | --- | --- | --- |
+| [target_field](#target_field-string) | string | no | uuid |
+
+##### target_field [string]
+
+New field name, default is `uuid`.
+
+### Example
+
+```
+uuid {
+    target_field = "id"
+}
+```
diff --git a/versioned_docs/version-1.x/configuration/filter-plugins/Watermark.md b/versioned_docs/version-1.x/configuration/filter-plugins/Watermark.md
new file mode 100644
index 00000000..3253d7f8
--- /dev/null
+++ b/versioned_docs/version-1.x/configuration/filter-plugins/Watermark.md
@@ -0,0 +1,33 @@
+## Filter plugin : Watermark
+
+* Author: InterestingLab
+* Homepage: https://interestinglab.github.io/seatunnel-docs
+* Version: 1.3.0
+
+### Description
+
+Allows the user to specify the threshold of late data, and allows the engine to accordingly clean up old state.
+
+### Options
+
+| name | type | required | default value |
+| --- | --- | --- | --- |
+| [event_time](#event_time-string) | string | yes |  |
+| [delay_threshold](#delay_threshold-string) | string | yes |  |
+
+##### event_time [string]
+
+The name of the column that contains the event time of the row.
+
+##### delay_threshold [string]
+
+The minimum delay to wait to data to arrive late, relative to the latest record that has been processed in the form of an interval (e.g. "1 minute" or "5 hours").
+
+### Example
+
+```
+Watermark {
+    event_time = "datetime"
+    delay_threshold = "5 minutes"
+}
+```
diff --git a/versioned_docs/version-1.x/configuration/input-plugin.md b/versioned_docs/version-1.x/configuration/input-plugin.md
new file mode 100644
index 00000000..9eb9ffd8
--- /dev/null
+++ b/versioned_docs/version-1.x/configuration/input-plugin.md
@@ -0,0 +1,31 @@
+# Input plugin
+
+### Input plugin general parameters
+
+| name | type | required | default value |
+| --- | --- | --- | --- |
+| [result_table_name](#result_table_name-string) | string | yes | - |
+| [table_name](#table_name-string) | string | no | - |
+
+
+##### result_table_name [string]
+
+When `result_table_name is not specified`, the data processed by this plugin will not be registered as a dataset that can be directly accessed by other plugins, or called a temporary table;
+
+When `result_table_name` is specified, the data processed by this plugin will be registered as a dataset that can be directly accessed by other plugins, or called a temporary table. The dataset registered here, other plugins can directly access by specifying `source_table_name`.
+
+
+##### table_name [string]
+
+**\[Deprecated from v1.4\]** The function is the same as `result_table_name`, this parameter will be deleted in subsequent Release versions, it is recommended to use the `result_table_name` parameter
+
+
+### Usage example
+
+````
+fake {
+    result_table_name = "view_table_2"
+}
+````
+
+> The result of the data source `fake` will be registered as a temporary table named `view_table_2`. This temporary table can be used by any `Filter` or `Output` plugin by specifying `source_table_name`.
\ No newline at end of file
diff --git a/versioned_docs/version-1.x/configuration/input-plugins/Alluxio.md b/versioned_docs/version-1.x/configuration/input-plugins/Alluxio.md
new file mode 100644
index 00000000..90ba4e1f
--- /dev/null
+++ b/versioned_docs/version-1.x/configuration/input-plugins/Alluxio.md
@@ -0,0 +1,50 @@
+## Input plugin : Alluxio
+
+* Author: InterestingLab
+* Homepage: https://interestinglab.github.io/seatunnel-docs
+* Version: 1.5.0
+
+### Description
+
+Read raw data from Alluxio.
+
+### Options
+
+| name | type | required | default value |
+| --- | --- | --- | --- |
+| [path](#path-string) | string | yes | - |
+
+##### path [string]
+
+File path on Alluxio cluster.
+
+### Note 
+if use alluxio with zookeeper, please add below in start-seatunnel.sh 
+
+```
+driverJavaOpts="-Dalluxio.user.file.writetype.default=CACHE_THROUGH -Dalluxio.zookeeper.address=your.zookeeper.address:zookeeper.port -Dalluxio.zookeeper.enabled=true"
+executorJavaOpts="-Dalluxio.user.file.writetype.default=CACHE_THROUGH -Dalluxio.zookeeper.address=your.zookeeper.address:zookeeper.port -Dalluxio.zookeeper.enabled=true"
+```
+
+or you can also add below in spark{} in seatunnel configuration after 1.5.0 
+
+```
+spark.driverJavaOpts="-Dalluxio.user.file.writetype.default=CACHE_THROUGH -Dalluxio.zookeeper.address=your.zookeeper.address:zookeeper.port -Dalluxio.zookeeper.enabled=true"
+spark.executorJavaOpts="-Dalluxio.user.file.writetype.default=CACHE_THROUGH -Dalluxio.zookeeper.address=your.zookeeper.address:zookeeper.port -Dalluxio.zookeeper.enabled=true"
+```
+
+### Example
+
+```
+alluxio {
+    path = "alluxio:///access.log"
+}
+```
+
+or you can specify alluxio name service:
+
+```
+alluxio {
+    path = "alluxio://m2:8022/access.log"
+}
+```
diff --git a/versioned_docs/version-1.x/configuration/input-plugins/Fake.md b/versioned_docs/version-1.x/configuration/input-plugins/Fake.md
new file mode 100644
index 00000000..b291f23b
--- /dev/null
+++ b/versioned_docs/version-1.x/configuration/input-plugins/Fake.md
@@ -0,0 +1,88 @@
+## Input plugin : Fake
+
+* Author: InterestingLab
+* Homepage: https://interestinglab.github.io/seatunnel-docs
+* Version: 1.1.0
+
+### Description
+
+Input plugin for producing test data.
+
+
+### Options
+
+| name | type | required | default value |
+| --- | --- | --- | --- |
+| [data_format](#data_format-string) | string | no | text |
+| [json_keys](#json_keys-array) | array | no | - |
+| [num_of_fields](#num_of_fields-number) | number | no | 10 |
+| [rate](#rate-number) | number | yes | - |
+| [text_delimeter](#text_delimeter-string) | string | no | , |
+
+##### data_format [string]
+
+The format of test data, supports `text` and `json`
+
+##### json_keys [array]
+
+The list of JSON data's key, used when `data_format` is json
+
+##### num_of_fields [number]
+
+The number of fields, used when `data_format` is text
+
+
+##### rate [number]
+
+The number of test data produced per second
+
+##### text_delimiter [string]
+
+Text data separator, used when `data_format` is text
+
+### Examples
+
+1. With `data_format`
+
+    ```
+    fake {
+        data_format = "text"
+        text_delimeter = ","
+        num_of_fields = 5
+        rate = 5
+    }
+    ```
+
+* **Input**
+
+    ```
+    +-------------------------------------------------------------------------------------------+
+    |raw_message                                                                                |
+    +-------------------------------------------------------------------------------------------+
+    |Random1-1462437280,Random215896330,Random3-2009195549,Random41027365838,Random51525395111  |
+    |Random1-2135047059,Random2-1030689538,Random3-854912064,Random4126768642,Random5-1483841750|
+    +-------------------------------------------------------------------------------------------+
+    ```
+
+
+2. Without `data_format`
+
+    ```
+    fake {
+        content = ['name=ricky&age=23', 'name=gary&age=28']
+        rate = 5
+    }
+    ```
+
+* **Input**
+
+    ```
+    +-----------------+
+    |raw_message      |
+    +-----------------+
+    |name=gary&age=28 |
+    |name=ricky&age=23|
+    +-----------------+
+    ```
+
+    > Randomly extract the string from the `content ` list
diff --git a/versioned_docs/version-1.x/configuration/input-plugins/File.md b/versioned_docs/version-1.x/configuration/input-plugins/File.md
new file mode 100644
index 00000000..0c959773
--- /dev/null
+++ b/versioned_docs/version-1.x/configuration/input-plugins/File.md
@@ -0,0 +1,39 @@
+## Input plugin : File
+
+* Author: InterestingLab
+* Homepage: https://interestinglab.github.io/seatunnel-docs
+* Version: 1.1.1
+
+### Description
+
+Read raw data from local file system.
+
+### Options
+
+| name | type | required | default value |
+| --- | --- | --- | --- |
+| [format](#format-string) | string | yes | json |
+| [path](#path-string) | string | yes | - |
+| [table_name](#table_name-string) | string | yes | - |
+
+##### format [string]
+
+The input data source format.
+
+##### path [string]
+
+File path.
+
+##### table_name [string]
+
+Registered table name of input data.
+
+### Example
+
+```
+file {
+    path = "file:///var/log/access.log"
+    table_name = "access"
+    format = "text"
+}
+```
diff --git a/versioned_docs/version-1.x/configuration/input-plugins/Hdfs.md b/versioned_docs/version-1.x/configuration/input-plugins/Hdfs.md
new file mode 100644
index 00000000..30784365
--- /dev/null
+++ b/versioned_docs/version-1.x/configuration/input-plugins/Hdfs.md
@@ -0,0 +1,35 @@
+## Input plugin : Hdfs
+
+* Author: InterestingLab
+* Homepage: https://interestinglab.github.io/seatunnel-docs
+* Version: 1.1.0
+
+### Description
+
+Read raw data from HDFS.
+
+### Options
+
+| name | type | required | default value |
+| --- | --- | --- | --- |
+| [path](#path-string) | string | yes | - |
+
+##### path [string]
+
+File path on Hadoop cluster.
+
+### Example
+
+```
+hdfs {
+    path = "hdfs:///access.log"
+}
+```
+
+or you can specify hdfs name service:
+
+```
+hdfs {
+    path = "hdfs://m2:8022/access.log"
+}
+```
diff --git a/versioned_docs/version-1.x/configuration/input-plugins/Jdbc.md b/versioned_docs/version-1.x/configuration/input-plugins/Jdbc.md
new file mode 100644
index 00000000..c638c361
--- /dev/null
+++ b/versioned_docs/version-1.x/configuration/input-plugins/Jdbc.md
@@ -0,0 +1,64 @@
+## Input plugin : Jdbc
+
+* Author: InterestingLab
+* Homepage: https://interestinglab.github.io/seatunnel-docs
+* Version: 1.1.2
+
+### Description
+
+Read data from an external data source via JDBC.
+
+### Options
+
+| name | type | required | default value |
+| --- | --- | --- | --- |
+| [driver](#driver-string) | string | yes | - |
+| [password](#password-string) | string | yes | - |
+| [table](#table-string) | string | yes | - |
+| [table_name](#table_name-string) | string | yes | - |
+| [url](#url-string) | string | yes | - |
+| [user](#user-string) | string | yes | - |
+
+##### driver [string]
+
+Class name of jdbc driver.
+
+##### password [string]
+
+Password.
+
+
+##### table [string]
+
+Table name.
+
+
+##### table_name [string]
+
+Registered table name of input data.
+
+
+##### url [string]
+
+The url of JDBC. For example: `jdbc:postgresql://localhost/test`
+
+
+##### user [string]
+
+Username.
+
+
+### Example
+
+```
+jdbc {
+    driver = "com.mysql.jdbc.Driver"
+    url = "jdbc:mysql://localhost:3306/info"
+    table = "access"
+    table_name = "access_log"
+    user = "username"
+    password = "password"
+}
+```
+
+> Read data from MySQL with jdbc.
diff --git a/versioned_docs/version-1.x/configuration/input-plugins/KafkaStream.md b/versioned_docs/version-1.x/configuration/input-plugins/KafkaStream.md
new file mode 100644
index 00000000..d2e554c5
--- /dev/null
+++ b/versioned_docs/version-1.x/configuration/input-plugins/KafkaStream.md
@@ -0,0 +1,65 @@
+## Input plugin : Kafka
+
+* Author: InterestingLab
+* Homepage: https://interestinglab.github.io/seatunnel-docs
+* Version: 1.1.0
+
+### Description
+
+Read events from one or more kafka topics. Supporting Kafka >= 0.10.0
+
+
+### Options
+
+| name | type | required | default value | engine |
+| --- | --- | --- | --- | --- |
+| [topics](#topics-string) | string | yes | - | all streaming |
+| [consumer.group.id](#consumergroupid-string) | string | yes | - | spark streaming |
+| [consumer.bootstrap.servers](#consumerbootstrapservers-string) | string | yes | - | all streaming |
+| [consumer.*](#consumer-string) | string | no | - | all streaming |
+
+##### topics [string]
+
+Kafka topic. Multiple topics separated by commas. For example, "tpc1,tpc2".
+
+##### consumer.group.id [string]
+
+Kafka consumer group id, a unique string that identifies the consumer group this consumer belongs to. Only works on Spark Streaming application.
+
+##### consumer.bootstrap.servers [string]
+
+A list of host/port pairs to use for establishing the initial connection to the Kafka cluster.This string should be in the form `host1:port1,host2:port2,.... `
+
+##### consumer [string]
+
+In addition to the above parameters that must be specified for the consumer client, you can also specify multiple kafka's consumer parameters described in [consumerconfigs](http://kafka.apache.org/10/documentation.html#consumerconfigs).
+
+The Spark Structured Streaming optional configurations refer to [Structured Streaming + Kafka Integration Guide](https://spark.apache.org/docs/latest/structured-streaming-kafka-integration.html#reading-data-from-kafka)
+
+The way to specify parameters is to use the prefix "consumer" before the parameter. For example, `rebalance.max.retries` is specified as: `consumer.rebalance.max.retries = 100`.If you do not specify these parameters, it will be set the default values according to Kafka documentation
+
+
+### Examples
+
+* Spark Streaming
+
+```
+kafkaStream {
+    topics = "seatunnel"
+    consumer.bootstrap.servers = "localhost:9092"
+    consumer.group.id = "seatunnel_group"
+    consumer.rebalance.max.retries = 100
+}
+```
+
+* Spark Structured Streaming
+
+```
+kafkaStream {
+    topics = "seatunnel"
+    consumer.bootstrap.servers = "localhost:9092"
+    consumer.group.id = "seatunnel_group"
+    consumer.rebalance.max.retries = 100
+    consumer.failOnDataLoss = false
+}
+```
diff --git a/versioned_docs/version-1.x/configuration/input-plugins/MySQL.md b/versioned_docs/version-1.x/configuration/input-plugins/MySQL.md
new file mode 100644
index 00000000..d9310d02
--- /dev/null
+++ b/versioned_docs/version-1.x/configuration/input-plugins/MySQL.md
@@ -0,0 +1,58 @@
+## Input plugin : Mysql
+
+* Author: InterestingLab
+* Homepage: https://interestinglab.github.io/seatunnel-docs
+* Version: 1.1.2
+
+### Description
+
+Read data from MySQL.
+
+### Options
+
+| name | type | required | default value |
+| --- | --- | --- | --- |
+| [password](#password-string) | string | yes | - |
+| [table](#table-string) | string | yes | - |
+| [table_name](#table_name-string) | string | yes | - |
+| [url](#url-string) | string | yes | - |
+| [user](#user-string) | string | yes | - |
+
+
+##### password [string]
+
+Password.
+
+##### table [string]
+
+Table name.
+
+
+##### table_name [string]
+
+Registered table name of input data.
+
+
+##### url [string]
+
+The url of JDBC. For example: `jdbc:mysql://localhost:3306/info`
+
+
+##### user [string]
+
+Username.
+
+
+### Example
+
+```
+mysql {
+    url = "jdbc:mysql://localhost:3306/info"
+    table = "access"
+    table_name = "access_log"
+    user = "username"
+    password = "password"
+}
+```
+
+> Read data from MySQL.
diff --git a/versioned_docs/version-1.x/configuration/input-plugins/RedisStream.md b/versioned_docs/version-1.x/configuration/input-plugins/RedisStream.md
new file mode 100644
index 00000000..8b44f5ed
--- /dev/null
+++ b/versioned_docs/version-1.x/configuration/input-plugins/RedisStream.md
@@ -0,0 +1,75 @@
+## Input plugin : RedisStream [Streaming]
+
+* Author: InterestingLab
+* Homepage: https://interestinglab.github.io/seatunnel-docs
+* Version: 1.1.0
+
+### Description
+
+Read data from Redis.
+
+### Options
+
+| name | type | required | default value |
+| --- | --- | --- | --- |
+| [host](#host-string) | string | yes | - |
+| [prefKey](#prefKey-string) | string | yes | - |
+| [queue](#queue-string) | string | yes | - |
+| [password](#password-string) | string | no | - |
+| [maxTotal](#maxTotal-number) | number | no | 200 |
+| [maxIdle](#maxIdle-number) | number | no | 200 |
+| [maxWaitMillis](#maxWaitMillis-number) | number | no | 2000 |
+| [connectionTimeout](#connectionTimeout-number) | number | no | 5000 |
+| [soTimeout](#soTimeout-number) | number | no | 5000 |
+| [maxAttempts](#maxAttempts-number) | number | no | 5 |
+
+##### host [string]
+
+redis cluster server host
+
+##### prefKey [string]
+
+redis key prefix , Splicing mode: prefKey + ':' + key
+
+##### queue [string]
+
+redis queue name , Data stored to queue
+
+##### password [string]
+
+redis password
+
+##### maxTotal [number]
+
+redis maxTotal config
+
+##### maxIdle [number]
+
+redis maxIdle config
+
+##### maxWaitMillis [number]
+
+redis maxWaitMillis config
+
+##### connectionTimeout [number]
+
+redis connectionTimeout config
+
+##### soTimeout [number]
+
+redis soTimeout config
+
+##### maxAttempts [number]
+
+redis maxAttempts config
+
+### Example
+
+```
+RedisStream {
+    host = "127.0.0.1:7000,127.0.0.1:7001,127.0.0.1:7002"
+    prefKey = "api"
+    queue = "test"
+    password = "root"
+}
+```
diff --git a/versioned_docs/version-1.x/configuration/input-plugins/S3.md b/versioned_docs/version-1.x/configuration/input-plugins/S3.md
new file mode 100644
index 00000000..e125701a
--- /dev/null
+++ b/versioned_docs/version-1.x/configuration/input-plugins/S3.md
@@ -0,0 +1,27 @@
+## Input plugin : S3
+
+* Author: InterestingLab
+* Homepage: https://interestinglab.github.io/seatunnel-docs
+* Version: 1.1.0
+
+### Description
+
+Read raw data from AWS S3 storage.
+
+### Options
+
+| name | type | required | default value |
+| --- | --- | --- | --- |
+| [path](#path-string) | string | yes | - |
+
+##### path [string]
+
+File path on S3, supported path formats are **s3://**, **s3a://**, **s3n://**
+
+### Example
+
+```
+hdfs {
+    path = "s3n://bucket/access.log"
+}
+```
diff --git a/versioned_docs/version-1.x/configuration/input-plugins/Socket.md b/versioned_docs/version-1.x/configuration/input-plugins/Socket.md
new file mode 100644
index 00000000..863f6573
--- /dev/null
+++ b/versioned_docs/version-1.x/configuration/input-plugins/Socket.md
@@ -0,0 +1,24 @@
+## Input plugin : Socket
+
+* Author: InterestingLab
+* Homepage: https://interestinglab.github.io/seatunnel-docs
+* Version: 1.1.0
+
+### Description
+
+Read data over a TCP socket
+
+### Options
+
+| name | type | required | default value |
+| --- | --- | --- | --- |
+| [host](#host-string) | string | no | localhost |
+| [port](#port-number) | number | no | 9999 |
+
+##### host [string]
+
+Socket server hostname
+
+##### port [number]
+
+Socket server port
diff --git a/versioned_docs/version-1.x/configuration/output-plugin.md b/versioned_docs/version-1.x/configuration/output-plugin.md
new file mode 100644
index 00000000..ae14dea7
--- /dev/null
+++ b/versioned_docs/version-1.x/configuration/output-plugin.md
@@ -0,0 +1,32 @@
+# Output plugin
+
+### Output plugin common parameters
+
+| name | type | required | default value |
+| --- | --- | --- | --- |
+| [source_table_name](#source_table_name-string) | string | no | - |
+
+
+
+##### source_table_name [string]
+
+When `source_table_name` is not specified, the current plugin processes the dataset output by the previous plugin in the configuration file;
+
+When `source_table_name` is specified, the current plugin processes the dataset corresponding to this parameter.
+
+
+### Usage example
+
+````
+stdout {
+     source_table_name = "view_table_2"
+}
+````
+
+> Output a temporary table named `view_table_2`.
+
+````
+stdout {}
+````
+
+> If `source_table_name` is not configured, output the processing result of the last `Filter` plugin in the configuration file
\ No newline at end of file
diff --git a/versioned_docs/version-1.x/configuration/output-plugins/Alluxio.md b/versioned_docs/version-1.x/configuration/output-plugins/Alluxio.md
new file mode 100644
index 00000000..4eddc34b
--- /dev/null
+++ b/versioned_docs/version-1.x/configuration/output-plugins/Alluxio.md
@@ -0,0 +1,81 @@
+## Output plugin : Alluxio
+
+* Author: InterestingLab
+* Homepage: https://interestinglab.github.io/seatunnel-docs
+* Version: 1.5.0
+
+### Description
+
+Write Rows to Alluxio.
+
+### Options
+
+| name | type | required | default value |
+| --- | --- | --- | --- |
+| [options](#options-object) | object | no | - |
+| [partition_by](#partition_by-array) | array | no | - |
+| [path](#path-string) | string | yes | - |
+| [path_time_format](#path_time_format-string) | string | no | yyyyMMddHHmmss |
+| [save_mode](#save_mode-string) | string | no | error |
+| [format](#format-string) | string | no | json |
+
+##### options [object]
+
+Custom parameters.
+
+##### partition_by [array]
+
+Partition the data based on the fields.
+
+##### path [string]
+
+File path on Alluxio. Start with `alluxio://`.
+
+##### path_time_format [string]
+
+If `path` contains time variables, such as `xxxx-${now}`, `path_time_format` can be used to specify the format of Alluxio path, default is `yyyy.MM.dd`. The commonly used time formats are listed below:
+
+
+| Symbol | Description |
+| --- | --- |
+| y | Year |
+| M | Month |
+| d | Day of month |
+| H | Hour in day (0-23) |
+| m | Minute in hour |
+| s | Second in minute |
+
+The detailed time format syntax:[Java SimpleDateFormat](https://docs.oracle.com/javase/tutorial/i18n/format/simpleDateFormat.html).
+
+##### save_mode [string]
+
+Save mode, supports `overwrite`, `append`, `ignore` and `error`. The detail of save_mode see [save-modes](http://spark.apache.org/docs/2.2.0/sql-programming-guide.html#save-modes).
+
+##### format [string]
+
+format, supports `csv`, `json`, `parquet` and `text`.
+
+### Note 
+if use alluxio with zookeeper, please add below in start-seatunnel.sh
+
+```
+driverJavaOpts="-Dalluxio.user.file.writetype.default=CACHE_THROUGH -Dalluxio.zookeeper.address=your.zookeeper.address:zookeeper.port -Dalluxio.zookeeper.enabled=true"
+executorJavaOpts="-Dalluxio.user.file.writetype.default=CACHE_THROUGH -Dalluxio.zookeeper.address=your.zookeeper.address:zookeeper.port -Dalluxio.zookeeper.enabled=true"
+```
+
+or you can also add below in spark{} in seatunnel configuration after 1.5.0 
+
+```
+spark.driverJavaOpts="-Dalluxio.user.file.writetype.default=CACHE_THROUGH -Dalluxio.zookeeper.address=your.zookeeper.address:zookeeper.port -Dalluxio.zookeeper.enabled=true"
+spark.executorJavaOpts="-Dalluxio.user.file.writetype.default=CACHE_THROUGH -Dalluxio.zookeeper.address=your.zookeeper.address:zookeeper.port -Dalluxio.zookeeper.enabled=true"
+```
+
+### Example
+
+```
+alluxio {
+    path = "alluxio:///var/logs-${now}"
+    format = "json"
+    path_time_format = "yyyy.MM.dd"
+}
+```
diff --git a/versioned_docs/version-1.x/configuration/output-plugins/Clickhouse.md b/versioned_docs/version-1.x/configuration/output-plugins/Clickhouse.md
new file mode 100644
index 00000000..3027f83f
--- /dev/null
+++ b/versioned_docs/version-1.x/configuration/output-plugins/Clickhouse.md
@@ -0,0 +1,110 @@
+## Output plugin : Clickhouse
+
+* Author: InterestingLab
+* Homepage: https://interestinglab.github.io/seatunnel-docs
+* Version: 1.1.0
+
+### Description
+
+Write Rows to ClickHouse via [Clickhouse-jdbc](https://github.com/yandex/clickhouse-jdbc). You need to create the corresponding table in advance.
+
+
+### Options
+
+| name | type | required | default value |
+| --- | --- | --- | --- |
+| [bulk_size](#bulk_size-number) | number| no |20000|
+| [clickhouse.*](#clickhouse-string) | string | no | - |
+| [database](#database-string) | string |yes|-|
+| [fields](#fields-list) | list | yes |-|
+| [host](#host-string) | string | yes |-|
+| [password](#password-string) | string | no |-|
+| [table](#table-string) | string | yes |-|
+| [username](#username-string) | string | no |-|
+
+#### bulk_size [number]
+
+The number of Rows written to ClickHouse through [ClickHouse JDBC](https://github.com/yandex/clickhouse-jdbc). Default is 20000.
+
+##### database [string]
+
+ClickHouse database.
+
+##### fields [list]
+
+Field list which need to be written to ClickHouse。
+
+##### host [string]
+
+ClickHouse hosts, format as `hostname:port`
+
+##### cluster [string]
+
+ClickHouse cluster name which the table belongs to, see [Distributed](https://clickhouse.tech/docs/en/operations/table_engines/distributed/)
+
+##### password [string]
+
+ClickHouse password, only used when ClickHouse has authority authentication.
+
+##### table [string]
+
+ClickHouse table name.
+
+##### username [string]
+
+ClickHouse username, only used when ClickHouse has authority authentication.
+
+##### clickhouse [string]
+
+In addition to the above parameters that must be specified for the clickhouse jdbc, you can also specify multiple parameters described in [clickhouse-jdbc settings](https://github.com/yandex/clickhouse-jdbc/blob/master/src/main/java/ru/yandex/clickhouse/settings/ClickHouseProperties.java)
+
+The way to specify parameters is to use the prefix "clickhouse" before the parameter. For example, `socket_timeout` is specified as: `clickhouse.socket_timeout = 50000`.If you do not specify these parameters, it will be set the default values according to clickhouse-jdbc.
+
+
+### ClickHouse Data Type Check List
+
+
+|ClickHouse Data Type|Convert Plugin Target Type|SQL Expression| Description |
+| :---: | :---: | :---:| :---:|
+|Date| string| string()|Format of `yyyy-MM-dd`|
+|DateTime| string| string()|Format of `yyyy-MM-dd HH:mm:ss`|
+|String| string| string()||
+|Int8| integer| int()||
+|Uint8| integer| int()||
+|Int16| integer| int()||
+|Uint16| integer| int()||
+|Int32| integer| int()||
+|Uint32| long| bigint()||
+|Int64| long| bigint()||
+|Uint64| long| bigint()||
+|Float32| float| float()||
+|Float64| double| double()||
+|Array(T)|-|-|
+|Nullable(T)|depend on T|depend on T||
+
+### Examples
+
+```
+clickhouse {
+    host = "localhost:8123"
+    clickhouse.socket_timeout = 50000
+    database = "nginx"
+    table = "access_msg"
+    fields = ["date", "datetime", "hostname", "http_code", "data_size", "ua", "request_time"]
+    username = "username"
+    password = "password"
+    bulk_size = 20000
+}
+```
+
+#### distribue table config
+```
+ClickHouse {
+    host = "localhost:8123"
+    database = "nginx"
+    table = "access_msg"
+    cluster = "no_replica_cluster"
+    fields = ["date", "datetime", "hostname", "http_code", "data_size", "ua", "request_time"]
+}
+```
+> Query system.clusters table info, find out which physic shard node store the table. The single spark partition would only write to a certain ClickHouse node using random policy. 
diff --git a/versioned_docs/version-1.x/configuration/output-plugins/Elasticsearch.md b/versioned_docs/version-1.x/configuration/output-plugins/Elasticsearch.md
new file mode 100644
index 00000000..5f76c995
--- /dev/null
+++ b/versioned_docs/version-1.x/configuration/output-plugins/Elasticsearch.md
@@ -0,0 +1,77 @@
+## Output plugin : Elasticsearch
+
+* Author: InterestingLab
+* Homepage: https://interestinglab.github.io/seatunnel-docs
+* Version: 1.0.0
+
+### Description
+
+Write Rows to Elasticsearch. Support Elasticsearch >= 2.X
+
+
+### Options
+
+| name | type | required | default value |
+| --- | --- | --- | --- |
+| [hosts](#hosts-array) | array | yes | - |
+| [index_type](#index_type-string) | string | no | _doc |
+| [index_time_format](#index_time_format-string) | string | no | yyyy.MM.dd |
+| [index](#index-string) | string | no | seatunnel |
+| [es](#es-string) | string | no | - |
+
+##### hosts [array]
+
+Elasticsearch hosts, format as `host:port`. For example, `["host1:9200", "host2:9200"]`
+
+##### index_type [string]
+
+Elasticsearch index type
+
+##### index_time_format [string]
+
+Elasticsearch time format. If `index` likes `xxxx-${now}`, `index_time_format` can be used to specify the format of index, default is `yyyy.MM.dd`. The commonly used time formats are listed below:
+
+| Symbol | Description |
+| --- | --- |
+| y | Year |
+| M | Month |
+| d | Day of month |
+| H | Hour in day (0-23) |
+| m | Minute in hour |
+| s | Second in minute |
+
+The detailed time format syntax:[Java SimpleDateFormat](https://docs.oracle.com/javase/tutorial/i18n/format/simpleDateFormat.html).
+
+##### index [string]
+
+Elasticsearch index name, if you want to generate index based on time, you need to specify the field like `seatunnel-${now}`. `now` means current time.
+
+
+##### es.* [string]
+
+You can also specify multiple Elasticsearch's parameters described in [Elasticsearch Configuration](https://www.elastic.co/guide/en/elasticsearch/hadoop/current/configuration.html#cfg-mapping.
+
+The way to specify parameters is to use the prefix "es" before the parameter. For example, `batch.size.entries` is specified as: `es.batch.size.entries = 1000`.If you do not specify these parameters, it will be set the default values according to Elasticsearch documentation
+
+
+### Examples
+
+```
+elasticsearch {
+    hosts = ["localhost:9200"]
+    index = "seatunnel"
+}
+```
+
+> Index name is `seatunnel`
+
+```
+elasticsearch {
+    hosts = ["localhost:9200"]
+    index = "seatunnel-${now}"
+    es.batch.size.entries = 100000
+    index_time_format = "yyyy.MM.dd"
+}
+```
+
+> Create index by day. For example: **seatunnel-2017.11.03**
diff --git a/versioned_docs/version-1.x/configuration/output-plugins/File.md b/versioned_docs/version-1.x/configuration/output-plugins/File.md
new file mode 100644
index 00000000..c87e4a9e
--- /dev/null
+++ b/versioned_docs/version-1.x/configuration/output-plugins/File.md
@@ -0,0 +1,66 @@
+## Output plugin : File
+
+* Author: InterestingLab
+* Homepage: https://interestinglab.github.io/seatunnel-docs
+* Version: 1.0.0
+
+### Description
+
+Write Rows to local file system.
+
+### Options
+
+| name | type | required | default value |
+| --- | --- | --- | --- |
+| [options](#options-object) | object | no | - |
+| [partition_by](#partition_by-array) | array | no | - |
+| [path](#path-string) | string | yes | - |
+| [path_time_format](#path_time_format-string) | string | no | yyyyMMddHHmmss |
+| [save_mode](#save_mode-string) | string | no | error |
+| [format](#format-string) | string | no | json |
+
+##### options [object]
+
+Custom parameters.
+
+##### partition_by [array]
+
+Partition the data based on the fields.
+
+##### path [string]
+
+Output File path. Start with `file://`.
+
+##### path_time_format [string]
+
+If `path` contains time variable, such as `xxxx-${now}`, `path_time_format` can be used to specify the format of path, default is `yyyy.MM.dd`. The commonly used time formats are listed below:
+
+
+| Symbol | Description |
+| --- | --- |
+| y | Year |
+| M | Month |
+| d | Day of month |
+| H | Hour in day (0-23) |
+| m | Minute in hour |
+| s | Second in minute |
+
+The detailed time format syntax:[Java SimpleDateFormat](https://docs.oracle.com/javase/tutorial/i18n/format/simpleDateFormat.html).
+
+##### save_mode [string]
+
+Save mode, supports `overwrite`, `append`, `ignore` and `error`. The detail of save_mode see [save-modes](http://spark.apache.org/docs/2.2.0/sql-programming-guide.html#save-modes).
+
+##### format [string]
+
+Output format, supports `csv`, `json`, `parquet` and `text`.
+
+
+### Example
+
+```
+file {
+    path = "file:///var/logs"
+    format = "text"
+}
+```
diff --git a/versioned_docs/version-1.x/configuration/output-plugins/Hdfs.md b/versioned_docs/version-1.x/configuration/output-plugins/Hdfs.md
new file mode 100644
index 00000000..a40a5f70
--- /dev/null
+++ b/versioned_docs/version-1.x/configuration/output-plugins/Hdfs.md
@@ -0,0 +1,67 @@
+## Output plugin : Hdfs
+
+* Author: InterestingLab
+* Homepage: https://interestinglab.github.io/seatunnel-docs
+* Version: 1.0.0
+
+### Description
+
+Write Rows to HDFS.
+
+### Options
+
+| name | type | required | default value |
+| --- | --- | --- | --- |
+| [options](#options-object) | object | no | - |
+| [partition_by](#partition_by-array) | array | no | - |
+| [path](#path-string) | string | yes | - |
+| [path_time_format](#path_time_format-string) | string | no | yyyyMMddHHmmss |
+| [save_mode](#save_mode-string) | string | no | error |
+| [format](#format-string) | string | no | json |
+
+##### options [object]
+
+Custom parameters.
+
+##### partition_by [array]
+
+Partition the data based on the fields.
+
+##### path [string]
+
+File path on HDFS. Start with `hdfs://`.
+
+##### path_time_format [string]
+
+If `path` contains time variables, such as `xxxx-${now}`, `path_time_format` can be used to specify the format of HDFS path, default is `yyyy.MM.dd`. The commonly used time formats are listed below:
+
+
+| Symbol | Description |
+| --- | --- |
+| y | Year |
+| M | Month |
+| d | Day of month |
+| H | Hour in day (0-23) |
+| m | Minute in hour |
+| s | Second in minute |
+
+The detailed time format syntax:[Java SimpleDateFormat](https://docs.oracle.com/javase/tutorial/i18n/format/simpleDateFormat.html).
+
+##### save_mode [string]
+
+Save mode, supports `overwrite`, `append`, `ignore` and `error`. The detail of save_mode see [save-modes](http://spark.apache.org/docs/2.2.0/sql-programming-guide.html#save-modes).
+
+##### format [string]
+
+Output format, supports `csv`, `json`, `parquet` and `text`.
+
+
+### Example
+
+```
+hdfs {
+    path = "hdfs:///var/logs-${now}"
+    format = "json"
+    path_time_format = "yyyy.MM.dd"
+}
+```
diff --git a/versioned_docs/version-1.x/configuration/output-plugins/Jdbc.md b/versioned_docs/version-1.x/configuration/output-plugins/Jdbc.md
new file mode 100644
index 00000000..db0b3a38
--- /dev/null
+++ b/versioned_docs/version-1.x/configuration/output-plugins/Jdbc.md
@@ -0,0 +1,61 @@
+## Output plugin : Jdbc
+
+* Author: InterestingLab
+* Homepage: https://interestinglab.github.io/seatunnel-docs
+* Version: 1.0.0
+
+### Description
+
+Write Rows to an external data source via JDBC.
+
+### Options
+
+| name | type | required | default value |
+| --- | --- | --- | --- |
+| [driver](#driver-string) | string | yes | - |
+| [password](#password-string) | string | yes | - |
+| [save_mode](#save_mode-string) | string | no | append |
+| [table](#table-string) | string | yes | - |
+| [url](#url-string) | string | yes | - |
+| [user](#user-string) | string | yes | - |
+
+##### driver [string]
+
+Class name of jdbc driver.
+
+##### password [string]
+
+Password.
+
+##### save_mode [string]
+
+Save mode, supports `overwrite`, `append`, `ignore` and `error`. The detail of save_mode see [save-modes](http://spark.apache.org/docs/2.2.0/sql-programming-guide.html#save-modes).
+
+##### table [string]
+
+Table name.
+
+##### url [string]
+
+The url of JDBC. For example: `jdbc:postgresql://localhost/test`
+
+
+##### user [string]
+
+Username.
+
+
+### Example
+
+```
+jdbc {
+    driver = "com.mysql.jdbc.Driver"
+    url = "jdbc:mysql://localhost:3306/info"
+    table = "access"
+    user = "username"
+    password = "password"
+    save_mode = "append"
+}
+```
+
+> write data to mysql with jdbc output. 
diff --git a/versioned_docs/version-1.x/configuration/output-plugins/Kafka.md b/versioned_docs/version-1.x/configuration/output-plugins/Kafka.md
new file mode 100644
index 00000000..576260ea
--- /dev/null
+++ b/versioned_docs/version-1.x/configuration/output-plugins/Kafka.md
@@ -0,0 +1,41 @@
+## Output plugin : Kafka
+
+* Author: InterestingLab
+* Homepage: https://interestinglab.github.io/seatunnel-docs
+* Version: 1.0.0
+
+### Description
+
+Write Rows to a Kafka topic.
+
+### Options
+
+| name | type | required | default value | engine |
+| --- | --- | --- | --- | --- |
+| [producer.bootstrap.servers](#producerbootstrapservers-string) | string | yes | - | all streaming |
+| [topic](#topic-string) | string | yes | - | all streaming |
+| [producer.*](#producer-string) | string | no | - | all streaming |
+
+##### producer.bootstrap.servers [string]
+
+Kafka Brokers List
+
+##### topic [string]
+
+Kafka Topic
+
+##### producer [string]
+
+In addition to the above parameters that must be specified for the producer client, you can also specify multiple kafka's producer parameters described in [producerconfigs](http://kafka.apache.org/10/documentation.html#producerconfigs)
+
+The way to specify parameters is to use the prefix "producer" before the parameter. For example, `request.timeout.ms` is specified as: `producer.request.timeout.ms = 60000`.If you do not specify these parameters, it will be set the default values according to Kafka documentation
+
+
+### Examples
+
+```
+kafka {
+    topic = "seatunnel"
+    producer.bootstrap.servers = "localhost:9092"
+}
+```
diff --git a/versioned_docs/version-1.x/configuration/output-plugins/MySQL.md b/versioned_docs/version-1.x/configuration/output-plugins/MySQL.md
new file mode 100644
index 00000000..f4344af4
--- /dev/null
+++ b/versioned_docs/version-1.x/configuration/output-plugins/MySQL.md
@@ -0,0 +1,54 @@
+## Output plugin : Mysql
+
+* Author: InterestingLab
+* Homepage: https://interestinglab.github.io/seatunnel-docs
+* Version: 1.0.0
+
+### Description
+
+Write Rows to MySQL.
+
+### Options
+
+| name | type | required | default value |
+| --- | --- | --- | --- |
+| [password](#password-string) | string | yes | - |
+| [save_mode](#save_mode-string) | string | no | append |
+| [table](#table-string) | string | yes | - |
+| [url](#url-string) | string | yes | - |
+| [user](#user-string) | string | yes | - |
+
+
+##### password [string]
+
+Password.
+
+##### save_mode [string]
+
+Save mode, supports `overwrite`, `append`, `ignore` and `error`. The detail of save_mode see [save-modes](http://spark.apache.org/docs/2.2.0/sql-programming-guide.html#save-modes).
+
+##### table [string]
+
+Table name.
+
+##### url [string]
+
+The url of JDBC. For example: `jdbc:mysql://localhose:3306/info`
+
+
+##### user [string]
+
+Username.
+
+
+### Example
+
+```
+mysql {
+    url = "jdbc:mysql://localhost:3306/info"
+    table = "access"
+    user = "username"
+    password = "password"
+    save_mode = "append"
+}
+```
diff --git a/versioned_docs/version-1.x/configuration/output-plugins/S3.md b/versioned_docs/version-1.x/configuration/output-plugins/S3.md
new file mode 100644
index 00000000..b90c9c60
--- /dev/null
+++ b/versioned_docs/version-1.x/configuration/output-plugins/S3.md
@@ -0,0 +1,66 @@
+## Output plugin : S3
+
+* Author: InterestingLab
+* Homepage: https://interestinglab.github.io/seatunnel-docs
+* Version: 1.0.0
+
+### Description
+
+Write Rows to AWS S3 storage.
+
+### Options
+
+| name | type | required | default value |
+| --- | --- | --- | --- |
+| [options](#options-object) | object | no | - |
+| [partition_by](#partition_by-array) | array | no | - |
+| [path](#path-string) | string | yes | - |
+| [path_time_format](#path_time_format-string) | string | no | yyyyMMddHHmmss |
+| [save_mode](#save_mode-string) | string | no | error |
+| [format](#format-string) | string | no | json |
+
+##### options [object]
+
+Custom parameters.
+
+##### partition_by [array]
+
+Partition the data based on the fields.
+
+##### path [string]
+
+File path on AWS S3 storage. Start with `s3://`, `s3a://` or `s3n://`.
+
+##### path_time_format [string]
+
+If `path` contains time variables, such as `xxxx-${now}`, `path_time_format` can be used to specify the format of s3 path, default is `yyyy.MM.dd`. The commonly used time formats are listed below:
+
+
+| Symbol | Description |
+| --- | --- |
+| y | Year |
+| M | Month |
+| d | Day of month |
+| H | Hour in day (0-23) |
+| m | Minute in hour |
+| s | Second in minute |
+
+The detailed time format syntax:[Java SimpleDateFormat](https://docs.oracle.com/javase/tutorial/i18n/format/simpleDateFormat.html).
+
+##### save_mode [string]
+
+Save mode, supports `overwrite`, `append`, `ignore` and `error`. The detail of save_mode see [save-modes](http://spark.apache.org/docs/2.2.0/sql-programming-guide.html#save-modes).
+
+##### format [string]
+
+Output format, supports `csv`, `json`, `parquet` and `text`.
+
+
+### Example
+
+```
+s3 {
+    path = "s3a://var/logs"
+    format = "parquet"
+}
+```
diff --git a/versioned_docs/version-1.x/configuration/output-plugins/Stdout.md b/versioned_docs/version-1.x/configuration/output-plugins/Stdout.md
new file mode 100644
index 00000000..86510d8d
--- /dev/null
+++ b/versioned_docs/version-1.x/configuration/output-plugins/Stdout.md
@@ -0,0 +1,33 @@
+## Output plugin : Stdout
+
+* Author: InterestingLab
+* Homepage: https://interestinglab.github.io/seatunnel-docs
+* Version: 1.0.0
+
+### Description
+
+Output Rows to console, it is always used for debugging.
+
+### Options
+
+| name | type | required | default value | engine |
+| --- | --- | --- | --- | --- |
+| [limit](#limit-number) | number | no | 100 | batch/spark streaming |
+| [format](#format-string) | string | no | plain | batch/spark streaming |
+
+##### limit [number]
+
+Limit number of output. `-1` means no limit.
+
+##### format [string]
+
+The format used for output, the allowed formats are `json`, `plain` and `schema`
+
+### Example
+
+```
+stdout {
+    limit = 10
+    format = "json"
+}
+```
diff --git a/versioned_docs/version-1.x/deployment.md b/versioned_docs/version-1.x/deployment.md
new file mode 100644
index 00000000..d0789019
--- /dev/null
+++ b/versioned_docs/version-1.x/deployment.md
@@ -0,0 +1,58 @@
+# Deploy and run
+
+> Seatunnel depends on Java operating environment and Spark. For detailed seatunnel installation steps, please refer to [Installing seatunnel](/zh-cn/v1/installation)
+
+The following highlights how the different platforms work:
+
+### Run seatunnel locally in local mode
+
+````
+./bin/start-seatunnel.sh --master local[4] --deploy-mode client --config ./config/application.conf
+````
+
+### Running seatunnel on Spark Standalone cluster
+
+````
+# client mode
+./bin/start-seatunnel.sh --master spark://207.184.161.138:7077 --deploy-mode client --config ./config/application.conf
+
+# cluster mode
+./bin/start-seatunnel.sh --master spark://207.184.161.138:7077 --deploy-mode cluster --config ./config/application.conf
+````
+
+### Running seatunnel on Yarn cluster
+
+````
+# client mode
+./bin/start-seatunnel.sh --master yarn --deploy-mode client --config ./config/application.conf
+
+# cluster mode
+./bin/start-seatunnel.sh --master yarn --deploy-mode cluster --config ./config/application.conf
+````
+
+### Running seatunnel on Mesos
+
+````
+# cluster mode
+./bin/start-seatunnel.sh --master mesos://207.184.161.138:7077 --deploy-mode cluster --config ./config/application.conf
+````
+
+---
+
+The `master`, `deploy-mode` parameters of start-seatunnel.sh have the same meaning as Spark `master`, `deploy-mode`,
+Reference: [Spark Submitting Applications](http://spark.apache.org/docs/latest/submitting-applications.html)
+
+If you want to specify the resource size occupied by seatunnel when running, or other Spark parameters, you can specify it in the configuration file specified by `--config`:
+
+````
+spark {
+  spark.executor.instances = 2
+  spark.executor.cores = 1
+  spark.executor.memory = "1g"
+  ...
+}
+...
+
+````
+
+For how to configure seatunnel, see [seatunnel configuration](/zh-cn/v1/configuration/base)
\ No newline at end of file
diff --git a/versioned_docs/version-1.x/developing-plugin.md b/versioned_docs/version-1.x/developing-plugin.md
new file mode 100644
index 00000000..7e48c83c
--- /dev/null
+++ b/versioned_docs/version-1.x/developing-plugin.md
@@ -0,0 +1,309 @@
+# Plugin development
+
+
+## Introduction to plugin system
+
+The seatunnel plugin is divided into three parts, **Input**, **Filter** and **Output**
+
+### Input
+
+**Input** is responsible for converting the data of the external data source into `DStream[(String, String)]`
+
+### Filter
+
+**Filter** is a [transform](http://spark.apache.org/docs/latest/rdd-programming-guide.html#transformations) operation, responsible for operating on the data structure of Dataset[Row]
+
+### Output
+
+**Output** is the [action](http://spark.apache.org/docs/latest/rdd-programming-guide.html#actions) operation, which is responsible for outputting the Dataset[Row] to an external data source or printing it to terminal
+
+## Ready to work
+
+seatunnel supports Java/Scala as a development language for plug-ins, among which Scala is recommended for the **Input** plug-in, and both Java and Scala are available for other types of plug-ins.
+
+Create a new Java/Scala project, or you can directly pull [seatunnel-filter-example](https://github.com/InterestingLab/seatunnel-filter-example), and then make modifications on this project
+
+## 1. Create a new pom.xml
+
+Reference file [pom.xml](https://github.com/InterestingLab/seatunnel-filter-example/blob/master/pom.xml)
+
+Add the interface provided by seatunnel to the project's dependencies
+````
+<dependency>
+    <groupId>io.github.interestinglab.seatunnel</groupId>
+    <artifactId>seatunnel-apis_2.11</artifactId>
+    <version>1.1.0</version>
+</dependency>
+````
+
+## 2. Implement your own method
+
+### Input (live stream)
+
+- Create a new class and inherit the parent class `BaseInput` provided by **seatunnel-apis**
+    ```scala
+    class ScalaHdfs extends BaseStreamingInput {
+    
+      var config: Config = ConfigFactory.empty()
+    
+      /**
+        * SetConfig.
+        **/
+      override def setConfig(config: Config): Unit = {
+        this.config = config
+      }
+    
+      /**
+        * GetConfig.
+        **/
+      override def getConfig(): Config = {
+        this.config
+      }
+    ````
+- Override the `checkConfig`, `prepare` and `getDstream` methods defined by the parent class
+    ```scala
+    override def checkConfig(): (Boolean, String) = {}
+    override def prepare(spark: SparkSession): Unit = {}
+    override def getDStream(ssc: StreamingContext): DStream[(String, String)] = {}
+  
+    ````
+- When the **Input** plugin is called, it will first execute the `checkConfig` method to check whether the parameters passed in when calling the plugin are correct, then call the `prepare` method to configure the default values ​​of the parameters and initialize the member variables of the class, and finally call the ` getStream` method converts external data source to `DStream[(String, String)]`
+- Scala version **Input** plugin implementation refer to [ScalaHdfs](https://github.com/InterestingLab/seatunnel-filter-example/blob/master/src/main/scala/org/interestinglab/seatunnel/input/ScalaHdfs .scala)
+
+
+### Filter
+
+- Create a new class and inherit the parent class `BaseFilter` provided by **seatunnel-apis**
+    ```Scala
+    class ScalaSubstring extends BaseFilter {
+    
+      var config: Config = ConfigFactory.empty()
+    
+      /**
+        * SetConfig.
+        **/
+      override def setConfig(config: Config): Unit = {
+        this.config = config
+      }
+    
+      /**
+        * GetConfig.
+        **/
+      override def getConfig(): Config = {
+        this.config
+      }
+    }
+    ````
+    ````Java
+    public class JavaSubstring extends BaseFilter {
+    
+        private Config config;
+    
+        @Override
+        public Config getConfig() {
+            return config;
+        }
+    
+        @Override
+        public void setConfig(Config config) {
+            this.config = config;
+        }
+    }
+    ````
+- Override the `checkConfig`, `prepare` and `process` methods defined by the parent class
+    ```Scala
+    override def checkConfig(): (Boolean, String) = {}
+    override def prepare(spark: SparkSession): Unit = {}
+    override def process(spark: SparkSession, ds: Dataset[Row]): Dataset[Row] = {}
+    ````
+    ````Java
+    @Override
+    public Tuple2<Object, String> checkConfig() {}
+    @Override
+    public void prepare(SparkSession spark, StreamingContext ssc) {}
+    @Override
+    public Dataset<Row> process(SparkSession spark, Dataset<Row> df) {}
+    ````
+    - When the **Filter** plugin is called, it will first execute the `checkConfig` method to check whether the parameters passed in when calling the plugin are correct, then call the `prepare` method to configure the default values ​​of the parameters and initialize the member variables of the class, and finally call the ` The process` method processes data in **Dataset[Row]** format.
+    - For the implementation of the **Filter** plugin in the Java version, refer to [JavaSubstring](https://github.com/InterestingLab/seatunnel-filter-example/blob/master/src/main/java/org/interestinglab/seatunnel/filter/ JavaSubstring.java), the implementation of the Scala version **Filter** plugin refers to [ScalaSubstring](https://github.com/InterestingLab/seatunnel-filter-example/blob/master/src/main/scala/org/interestinglab/ seatunnel/filter/ScalaSubstring.scala)
+
+### Output
+
+- Create a new class and inherit the parent class `BaseOutput` provided by **seatunnel-apis**
+    ```Scala
+    class ScalaStdout extends BaseOutput {
+    
+    
+      var config: Config = ConfigFactory.empty()
+    
+      /**
+        * SetConfig.
+        **/
+      override def setConfig(config: Config): Unit = {
+        this.config = config
+      }
+    
+      /**
+        * GetConfig.
+        **/
+      override def getConfig(): Config = {
+        this.config
+      }
+    }
+    ````
+    ````Java
+    public class JavaStdout extends BaseOutput {
+    
+        private Config config;
+    
+        @Override
+        public Config getConfig() {
+            return config;
+        }
+    
+        @Override
+        public void setConfig(Config config) {
+            this.config = config;
+        }
+    }
+    ````
+- Override the `checkConfig`, `prepare` and `process` methods defined by the parent class
+    ```Scala
+    override def checkConfig(): (Boolean, String) = {}
+    override def prepare(spark: SparkSession): Unit = {}
+    override def process(spark: SparkSession, ds: Dataset[Row]): Dataset[Row] = {}
+    ````
+    ````Java
+    @Override
+    public Tuple2<Object, String> checkConfig() {}
+    @Override
+    public void prepare(SparkSession spark) {}
+    @Override
+    public Dataset<Row> process(SparkSsession spark, Dataset<Row> ds) {}
+    ````
+- **Output** plugin call structure is similar to **Filter** plugin. When calling, it will first execute the `checkConfig` method to check whether the parameters passed in when calling the plugin are correct, then call the `prepare` method to configure the default values ​​of the parameters and initialize the member variables of the class, and finally call the `process` method to set the **Dataset [Row]** format data output to an external data source.
+- For the implementation of the **Output** plugin in the Java version, refer to [JavaStdout](https://github.com/InterestingLab/seatunnel-filter-example/blob/master/src/main/java/org/interestinglab/seatunnel/output/ JavaStdout.java), the implementation of the Scala version **Output** plugin refers to [ScalaStdout](https://github.com/InterestingLab/seatunnel-filter-example/blob/master/src/main/scala/org/interestinglab/ seatunnel/output/ScalaStdout.scala)
+
+### UDFs
+
+- Create a new class and inherit the parent class `BaseFilter` provided by **seatunnel-apis**
+    ```Scala
+    class ScalaSubstring extends BaseFilter {
+    
+      var config: Config = ConfigFactory.empty()
+    
+      /**
+        * SetConfig.
+        **/
+      override def setConfig(config: Config): Unit = {
+        this.config = config
+      }
+    
+      /**
+        * GetConfig.
+        **/
+      override def getConfig(): Config = {
+        this.config
+      }
+    }
+    ````
+- Override the `checkConfig`, `prepare`, `getUdfList` and `process` methods defined by the parent class. Only the `getUdfList` and `process` methods are introduced here
+    ```Scala
+    override def getUdfList(): List[(String, UserDefinedFunction)] = {
+      val func = udf((s: String, pos: Int, len: Int) => s. substring(pos, pos+len))
+      List(("my_sub", func))
+    }
+    override def process(spark: SparkSession, ds: Dataset[Row]): Dataset[Row] = {
+      val srcField = config.getString("source_field")
+      val targetField = config.getString("target_field")
+      val pos = config.getInt("pos")
+      val len = config.getInt("len")
+      val func = getUdfList().get(0)._2
+      df.withColumn(targetField, func(col(srcField), lit(pos), lit(len)))
+    }
+    ````
+  For a complete case of UDF plug-in development, refer to [ScalaSubstring](https://github.com/InterestingLab/seatunnel-example/blob/rickyhuo.fea.udf/src/main/scala/org/interestinglab/seatunnel/filter/ScalaSubstring.scala #L15)
+- New META-INF/services
+
+  Seatunnel will use the **Service loader** mechanism to register the method that implements `io.github.interestinglab.seatunnel.apis.BaseFilter` as UDF according to the method returned by `getUdfList`. If the interface implementation class is not specified in services, it will not be used. will be registered as a UDF.
+
+  [META-INF] in the case (https://github.com/InterestingLab/seatunnel-example/blob/master/src/main/resources/META-INF/services/io.github.interestinglab.seatunnel.apis.BaseFilter )
+
+## 3. Package and use
+
+1. Packaging
+
+   > mvn package
+
+2. Put the packaged Jar package in the seatunnel `plugins` directory
+    ```shell
+    cd seatunnel-1.1.0
+    mkdir -p plugins/my_plugins/lib
+    cd plugins/my_plugins/lib
+    ````
+
+   Seatunnel needs to put the third-party Jar package, and must create a new **lib** folder
+   > plugins/your_plugin_name/lib/your_jar_name
+
+   other files in
+   > plugins/your_plugin_name/files/your_file_name
+
+3. Use plugins in configuration files
+
+   Here is a complete example using a third-party plugin and putting it in `config/application.conf`
+
+   The test data is generated by the `Fake` plugin, split with `Split`, then the third-party plugin `ScalaSubstring` is used to intercept the string, and finally the third-party plugin `JavaStdout` is used to print to the terminal.
+    ````
+    spark {
+        spark.streaming.batchDuration = 5
+        spark.app.name = "seatunnel-sample"
+        spark.ui.port = 13000
+        spark.executor.instances = 2
+        spark.executor.cores = 1
+        spark.executor.memory = "1g"
+    }
+
+    input {
+        fakeStream {
+            content = ["INFO : gary is 28 years old", "WARN : suwey is 16 years old"]
+            rate = 5
+        }
+    }
+
+    filter {
+        split {
+            fields = ["log_level", "message"]
+            delimiter = ":"
+        }
+        sql = {
+            table_name = "tmp"
+            # use UDF
+            sql = "select log_level, my_sub(message, 1, 3) from tmp"
+        }
+    }
+
+    output {
+        org.interestinglab.seatunnel.output.JavaStdout {
+            limit = 2
+        }
+    }
+    ````
+
+4. Start seatunnel
+
+    ````
+    ./bin/start-seatunnel.sh --config config/application.conf --deploy-mode client --master local[2]
+    ````
+
+5. View the results
+
+    ````
+    +---------+--------------------------------+
+    |log_level|UDF(message, 1, 3)|
+    +---------+--------------------------------+
+    |INFO |ary |
+    |INFO |ary |
+    +---------+--------------------------------+
+    only showing top 2 rows
+
+    ````
\ No newline at end of file
diff --git a/versioned_docs/version-1.x/installation.md b/versioned_docs/version-1.x/installation.md
new file mode 100644
index 00000000..9bb11425
--- /dev/null
+++ b/versioned_docs/version-1.x/installation.md
@@ -0,0 +1,33 @@
+# Download and install
+
+## download
+
+### Community Edition (Community)
+
+https://github.com/InterestingLab/seatunnel/releases
+
+## Environment preparation
+
+### Ready for JDK1.8
+
+Seatunnel relies on the JDK1.8 operating environment.
+
+### Ready for Spark
+
+Seatunnel depends on Spark. Before installing seatunnel, you need to prepare Spark.
+Please [download Spark](http://spark.apache.org/downloads.html) first, and select Spark version >= 2.x.x. After downloading and decompressing, you can submit tasks in Spark deploy-mode = local mode without any configuration.
+If you expect the task to run on a Standalone cluster or a Yarn or Mesos cluster, please refer to the Spark official website configuration documentation.
+
+### Install seatunnel
+
+Download the seatunnel installation package and unzip it, here is the community version as an example:
+
+````
+wget https://github.com/InterestingLab/seatunnel/releases/download/v<version>/seatunnel-<version>.zip -O seatunnel-<version>.zip
+unzip seatunnel-<version>.zip
+ln -s seatunnel-<version> seatunnel
+````
+
+There are no complicated installation and configuration steps, please refer to [Quick Start](/zh-cn/v1/quick-start.md) for the usage of seatunnel, and [Configuration](/zh-cn/v1/configuration/base for configuration ).
+
+If you want to deploy seatunnel to run on Spark Standalone/Yarn/Mesos cluster, please refer to [seatunnel deployment](/zh-cn/v1/deployment)
\ No newline at end of file
diff --git a/versioned_docs/version-1.x/internal.md b/versioned_docs/version-1.x/internal.md
new file mode 100644
index 00000000..727ca93c
--- /dev/null
+++ b/versioned_docs/version-1.x/internal.md
@@ -0,0 +1,33 @@
+# go deep into seatunnel
+
+## seatunnel strives to improve many pain points
+
+In addition to greatly simplifying the difficulty of distributed data processing, seatunnel does its best to solve the problems you may encounter:
+
+* Data loss and duplication
+
+For example, the Kafka Input of seatunnel is realized through the Kafka Direct API, and the exactly once operation is realized through the checkpoint mechanism or the support of the Output that supports idempotent writing. In addition, the project code of seatunnel has been thoroughly tested to minimize accidental data discarding caused by abnormal data processing.
+
+* Task stacking and delays
+
+In the online environment, there are a large number of Spark tasks or the Spark running environment of a single stage containing many tasks. We have encountered many times that the processing time of a single task is long, which slows down the entire batch. The function of Spark speculative execution is enabled by default in seatunnel. The speculative execution function will find the slow task and start a new task, and use the task completed first as the settlement result.
+
+* low throughput
+
+In the code implementation of seatunnel, a number of advanced features of Spark that have been proven in practice to improve processing performance are directly utilized, such as:
+
+(1) In the core process code, use Dataset, Spark SQL programming API, and effectively utilize Spark's catalyst optimizer.
+
+(2) Support the use of broadcast variable in plug-in implementation, which can optimize application scenarios such as IP library parsing and database link maintenance.
+
+(3) In the implementation code of the plugin, performance is always our priority.
+
+* The application to the production environment has a long cycle
+
+Using seatunnel can be used out of the box, and has simplified installation, deployment, and startup in many ways; the plug-in system is easy to configure and deploy, and developers can quickly integrate specific business logic in seatunnel.
+
+* Lack of application health monitoring
+
+(1) Seatunnel has its own monitoring tool `Guardian`, which is a sub-project of seatunnel. It can monitor whether seatunnel is alive, and can automatically pull up seatunnel instances according to the configuration; it can monitor whether there is accumulation and delay in the streaming batch when it is running, and send an alarm .
+
+(2) The time-consuming statistics of each stage of data processing will be added to the next release version to facilitate performance optimization.
\ No newline at end of file
diff --git a/versioned_docs/version-1.x/introduction.md b/versioned_docs/version-1.x/introduction.md
new file mode 100644
index 00000000..50e28f42
--- /dev/null
+++ b/versioned_docs/version-1.x/introduction.md
@@ -0,0 +1,178 @@
+# Introduction
+
+---
+
+## Core Concepts
+
+### Event
+
+#### Field Name
+
+A valid field name should not contains `.`, `@` and any other characters that not allowed in ANSI standard SQL 2003 syntax.
+
+Reserved field names includes:
+
+*   `__root__` means top level of the event.
+*   `__metadata__` means metadata field for internal use.
+
+#### Metadata
+
+Metadata can be set as usual fields, all the fields in metadata are invisible for output, it's just for internal use.
+
+#### Field Reference
+
+Single level: `a`
+Multiple level: `a.b.c`
+Top leve (Root) Reference: `__root__`
+
+> [TODO] Notes: this design should be compatible with Spark SQL.
+
+---
+
+## Input
+
+### Kafka
+
+---
+
+## Filters
+
+### JSON
+
+### Split
+
+#### Synopsis
+
+| Setting | Input type | Required | Default value |
+| --- | --- | --- | --- |
+| delimiter | string | no | " " |
+| keys | array | yes | [] |
+| source_field | string | yes | "" |
+| tag_on_failure | string | no | "_tag" |
+| target_field | string | no | "\_\_root\_\_" |
+
+
+#### Details
+
+*	delimiter
+
+regular expression is supported.
+
+*	keys
+
+if number of parts splited by `delimiter` is larger than number of keys in `keys`, the extra parts in the right side will be ignored. 
+
+*	source_field
+
+if `source_field` does not exists, nothing will be done.
+
+*	target_field
+
+---
+
+## SQL
+
+SQL can be used to filter and aggregate events, the underlying techniques is Spark SQL.
+
+For example, the following sql filters events that response_time between [300, 1200] milliseconds.
+
+```
+select * from mytable where response_time >= 300 and response_time <= 1200
+```
+
+And this sql count sales for each city:
+
+```
+select city, count(sales) from mytable group by city
+```
+
+Also, You can combine these two sqls into one sql for both filtering and aggregation:
+
+```
+select city, count(*) from mytable where response_time >= 300 and response_time <= 1200 group by city
+```
+
+Pipeline multiple sqls:
+
+```
+sql {
+    query {
+        table_name = "mytable1"
+        sql = "select * from mytable1 where "
+    }
+    
+    query {
+        table_name = ""
+    }
+}
+```
+
+### Query
+
+#### Synopsis
+
+| Setting | Input type | Required | Default value |
+| --- | --- | --- | --- |
+| table_name | string | no | "mytable" |
+| sql | string | yes | - |
+
+> TODO : maybe we can add a `schema` settings for explicitly defining table schema. By now, schema is auto generated.
+
+#### Details
+
+* table_name
+
+Registers a temporary table using the given name, the default value is "mytable". You can use it in `sql`, such as:
+
+```
+select * from mytable where http_status >= 500
+```
+
+* sql
+
+Executes a SQL query using the given sql string.
+
+---
+
+## Output
+
+### Kafka
+
+## Serializer
+
+### Raw
+
+The default serializer is `raw`. If no serializers configured in input/output, `raw` will be used.
+
+#### Synopsis
+
+| Setting | Input type | Required | Default value |
+| --- | --- | --- | --- |
+| charset | string | no | "utf-8" |
+
+#### Details
+
+*   charset
+
+Serialize or deserialize using the given charset.
+
+Available charsets are:
+
+> [TODO] list all supported charsets, refer to logstash and these links:
+
+https://docs.oracle.com/javase/7/docs/api/java/nio/charset/Charset.html
+http://docs.oracle.com/javase/7/docs/technotes/guides/intl/encoding.doc.html
+http://www.iana.org/assignments/character-sets/character-sets.xhtml
+
+
+
+### JSON
+
+### Tar.gz
+
+> compressed codec
+
+## Contact Us
+* Mail list: **dev@seatunnel.apache.org**. Mail to `dev-subscribe@seatunnel.apache.org`, follow the reply to subscribe the mail list.
+* Slack: Send `Request to join SeaTunnel slack` mail to the mail list(`dev@seatunnel.apache.org`), we will invite you in.
+
diff --git a/versioned_docs/version-1.x/monitoring.md b/versioned_docs/version-1.x/monitoring.md
new file mode 100644
index 00000000..c1ab730b
--- /dev/null
+++ b/versioned_docs/version-1.x/monitoring.md
@@ -0,0 +1,291 @@
+#Guardian
+
+[Guardian](https://github.com/InterestingLab/guardian) is a sub-project of seatunnel. It is a monitoring and alarming tool that can provide monitoring of seatunnel's survival and scheduling delay. Guardian is capable of dynamically loading configuration files at runtime and provides an HTTP API to support real-time modification of configuration. Currently only seatunnel on Yarn is supported.
+
+## run Guardian
+
+Download Guardian, take guardian_1.0.0 as an example
+````
+wget https://github.com/InterestingLab/guardian/releases/download/v1.0.0/guardian_1.0.0.tar.gz
+tar -xvf guardian_1.0.0
+cd guardian_1.0.0
+./bin/guardian check config.json
+````
+
+
+## config file
+
+Guardian configuration files are written in `JSON` format, a valid example, click [here](https://github.com/InterestingLab/guardian/blob/master/config.json.template)
+
+The entire configuration file consists of the following parts:
+
+- port: the port to which the interface API is bound
+- node_name: node information
+- check_interval: the time interval for checking the application
+- yarn: the detected YARN cluster address
+- apps: specific apps that need to be detected
+- alert_manager: alert management
+
+The following is a detailed description of each part:
+
+
+### yarn
+
+````
+# Yarn resourcemanager
+api_hosts: <list>
+````
+
+**Example**
+
+````
+"yarn": {
+    "api_hosts": [
+        "10.11.10.21:8088",
+        "10.11.10.22:8088"
+    ]
+}
+````
+
+### apps
+
+````
+[{
+    # Spark application name
+    "app_name": <string>,
+    # Restart command when application fails
+    "start_cmd": <string>,
+    # The number of applications running under the same app_name
+    "app_num": <number>,
+    # Application type, default 'spark'
+    "check_type": <string>,
+    # mark whether the application is valid or not
+    "active": <boolean>
+    "check_options": {
+        # Alarm level, support WARNNING, ERROR, etc.
+        "alert_level": <string>,
+        "max_delayed_batch_num": <number>,
+        "max_delayed_time": <number>
+    }
+}]
+````
+
+**Example**
+
+````
+"apps": [
+    {
+        "app_name": "seatunnel-app",
+        "start_cmd": "test_cmd",
+        "app_num": 1,
+        "check_type": "spark",
+        "check_options": {
+            "alert_level": "WARNING",
+            "max_delayed_batch_num": 10,
+            "max_delayed_time": 600
+        }
+    }
+]
+````
+
+### alert_manager
+
+#### routes
+
+Alarm routing, currently only supports alarm levels
+
+Trigger an alarm when the alarm level is `WARNNING` or `ERROR`
+
+````
+"routes": {
+    "match": {
+        "level": ["WARNING", "ERROR"]
+    }
+}
+````
+
+#### **emails**
+
+Send alarm information by email
+
+````
+# Email verification username
+"auth_username": <string>,
+# Email verification password
+"auth_password": <string>,
+# Mailbox stmp server
+"smtp_server": <string>,
+# sender
+"sender": <string>,
+# recipient list
+"receivers": <list>
+````
+
+**Example**
+
+````
+"emails": {
+    "auth_username": "username",
+    "auth_password": "password",
+    "smtp_server": "smtp.163.com",
+    "sender": "huochen1994@163.com",
+    "receivers": ["garygaowork@gmail.com"],
+    "routes": {
+        "match": {
+            "level": ["WARNING", "ERROR"]
+        }
+    }
+}
+````
+
+#### **webhooks**
+
+Implement custom alarm mode through interface
+
+````
+# webhook interface address
+"url": <string>
+````
+
+**Example**
+
+````
+"webhook": {
+    "url": "http://api.webhook.interestinglab.org/alert",
+    "routes": {
+        "match": {
+            "level": ["ERROR"]
+        }
+    }
+}
+````
+
+When Gaurdian calls the interface, it will send an HTTP POST request to the configured interface address in the following JSON format:
+
+````
+{
+    "subject": "Guardian",
+    "objects": "seatunnel_app",
+    "content": "App is not running or less than expected number of running instance, will restart"
+}
+````
+
+
+## Guardian interface usage guide
+
+
+### GET
+
+#### Overview
+
+* Function description
+
+  Get the configuration information of Guardian corresponding to app_name
+
+* Basic interface
+
+  http://localhost:5000/config/[app_name]
+
+* Request method
+
+  get
+
+#### Interface parameter definition
+
+N/A
+
+#### return result
+
+````
+curl 'http://localhost:5000/config/seatunnel-app2'
+
+{
+  "content": {
+    "app_name": "seatunnel-app2",
+    "app_num": 1,
+    "check_options": {},
+    "check_type": "spark",
+    "start_cmd": "test_cmd_not_exist"
+  },
+  "status": 0
+}
+````
+
+### POST
+
+#### Overview
+
+* Function description
+
+  Update or add application configuration information in Guardian. When `app_name` exists, update the corresponding configuration information. When `app_name` does not exist, add an application monitoring configuration
+
+* Basic interface
+
+  http://localhost:5000/config/[app_name]
+
+* Request method
+
+  post
+
+#### Interface parameter definition
+
+| Field | Type | Comment | Instance |
+| :--: | :--: | :--: | :--:|
+| start_cmd| string| restart command | |
+|app_num| num | Existing number | 2 |
+|check_type| string | Application type | spark |
+|check_options| dict| | |
+|active| boolean| is active | true|
+
+#### return result
+
+```
+`
+curl 'http://localhost:5000/config/seatunnel-app2' -d '
+{
+    'active': false
+}'
+
+{
+  "status": 0
+}
+````
+
+### DELETE
+
+#### Overview
+
+* Function description
+
+  Delete the configuration information of Guardian corresponding to app_name
+
+* Basic interface
+
+  http://localhost:5000/config/[app_name]
+
+* Request method
+
+  delete
+
+#### Interface parameter definition
+
+N/A
+
+#### return result
+
+````
+curl -XDELETE 10.212.81.56:5000/config/seatunnel-app2
+
+{
+  "status": 0
+}
+````
+
+
+### Return status code description
+
+| status | Description |
+| :--: | :--:|
+| 0 | Success|
+| 1 | Parameter error|
+| 2 | Internal error |
diff --git a/versioned_docs/version-1.x/quick-start.md b/versioned_docs/version-1.x/quick-start.md
new file mode 100644
index 00000000..8080029e
--- /dev/null
+++ b/versioned_docs/version-1.x/quick-start.md
@@ -0,0 +1,130 @@
+# Quick start
+
+> We take an application that receives data through a socket, divides the data into multiple fields, and outputs the processing results as an example to quickly demonstrate the use of seatunnel.
+
+### Step 1: Prepare the Spark runtime environment
+
+> If you are familiar with Spark or have prepared the Spark runtime environment, you can ignore this step, Spark does not require any special configuration.
+
+Please [download Spark](http://spark.apache.org/downloads.html) first, and select Spark version >= 2.x.x. After downloading and decompressing, you can submit tasks in Spark deploy-mode = local mode without any configuration.
+If you expect the task to run on a Standalone cluster or a Yarn or Mesos cluster, please refer to the [Spark Deployment Documentation] on the Spark official website (http://spark.apache.org/docs/latest/cluster-overview.html).
+
+### Step 2: Download seatunnel
+
+Go to the [seatunnel installation package download page](https://github.com/InterestingLab/seatunnel/releases/latest) and download the latest version of `seatunnel-<version>.zip`
+
+Or directly download the specified version (take 1.1.2 as an example):
+
+````
+wget https://github.com/InterestingLab/seatunnel/releases/download/v1.1.2/seatunnel-1.1.2.zip -O seatunnel-1.1.2.zip
+````
+
+After downloading, unzip:
+
+````
+unzip seatunnel-<version>.zip
+ln -s seatunnel-<version> seatunnel
+````
+
+### Step 3: Configure seatunnel
+
+Edit `config/seatunnel-env.sh`, specify the required environment configuration such as SPARK_HOME (the directory downloaded and decompressed by Spark in Step 1)
+
+Edit `config/application.conf`, which determines the way and logic of data input, processing, and output after seatunnel is started.
+
+````
+spark {
+  # seatunnel defined streaming batch duration in seconds
+  spark.streaming.batchDuration = 5
+
+  spark.app.name = "seatunnel"
+  spark.ui.port = 13000
+}
+
+input {
+  socketStream {}
+}
+
+filter {
+  split {
+    fields = ["msg", "name"]
+    delimiter = ","
+  }
+}
+
+output {
+  stdout {}
+}
+
+````
+
+### Step 4: Start the netcat server for sending data
+
+````
+nc -l -p 9999
+````
+
+
+### Step 5: Start seatunnel
+
+````
+cd seatunnel
+./bin/start-seatunnel.sh --master local[4] --deploy-mode client --config ./config/application.conf
+
+````
+
+### Step 6: Input at the nc terminal
+
+````
+Hello World, Gary
+````
+The seatunnel log prints out:
+
+````
++-----+------------+----+
+|raw_message |msg |name|
++-----+------------+----+
+|Hello World, Gary|Hello World|Gary|
++-----+------------+----+
+````
+
+
+### Summarize
+
+seatunnel is simple and easy to use, and there are richer data processing functions waiting to be discovered. The data processing case presented in this paper,
+No code, compilation, packaging required, simpler than the official [Quick Example](https://spark.apache.org/docs/latest/streaming-programming-guide.html#a-quick-example).
+
+
+---
+
+For more seatunnel configuration examples, see:
+
+[Configuration Example 1: Streaming Streaming Computing](https://github.com/InterestingLab/seatunnel/blob/master/config/streaming.conf.template)
+
+The above configuration is the default [Streaming Configuration Template], which can be run directly. The command is as follows:
+
+````
+cd seatunnel
+./bin/start-seatunnel.sh --master local[4] --deploy-mode client --config ./config/streaming.conf.template
+
+````
+
+[Configuration example 2: Batch offline batch](https://github.com/InterestingLab/seatunnel/blob/master/config/batch.conf.template)
+
+The above configuration is the default [offline batch configuration template], which can be run directly. The command is as follows:
+
+````
+cd seatunnel
+./bin/start-seatunnel.sh --master local[4] --deploy-mode client --config ./config/batch.conf.template
+
+````
+
+[Configuration Example 3: Structured Streaming Streaming](https://github.com/InterestingLab/seatunnel/blob/master/config/structuredstreaming.conf.template)
+
+The above configuration is the default [Structured Streaming configuration template], and the Kafka input source needs to be configured to run, the command is as follows:
+
+````
+cd seatunnel
+./bin/start-seatunnel-structured-streaming.sh --master local[4] --deploy-mode client --config ./config/batch.conf.template
+
+````
\ No newline at end of file
diff --git a/versioned_sidebars/version-1.x-sidebars.json b/versioned_sidebars/version-1.x-sidebars.json
new file mode 100644
index 00000000..694d32d8
--- /dev/null
+++ b/versioned_sidebars/version-1.x-sidebars.json
@@ -0,0 +1,73 @@
+{
+  "docs": [
+    "introduction",
+    "quick-start",
+    {
+      "type": "category",
+      "label": "Configuration",
+      "items": [
+        "configuration/base",
+        {
+          "type": "category",
+          "label": "Filter-Plugin",
+          "link": {
+            "type": "generated-index",
+            "title": "Filter of SeaTunnel",
+            "description": "List all filter supported Apache SeaTunnel for now.",
+            "slug": "/category/filter",
+            "keywords": ["filter"],
+            "image": "/img/favicon.ico"
+          },
+          "items": [
+            "configuration/filter-plugin",
+            {
+              "type": "autogenerated",
+              "dirName": "configuration/filter-plugins"
+            }
+          ]
+        },
+        {
+          "type": "category",
+          "label": "Input-Plugin",
+          "link": {
+            "type": "generated-index",
+            "title": "Input of SeaTunnel",
+            "description": "List all input supported Apache SeaTunnel for now.",
+            "slug": "/category/input",
+            "keywords": ["input"],
+            "image": "/img/favicon.ico"
+          },
+          "items": [
+            "configuration/input-plugin",
+            {
+              "type": "autogenerated",
+              "dirName": "configuration/input-plugins"
+            }
+          ]
+        },
+        {
+          "type": "category",
+          "label": "Output-Plugin",
+          "link": {
+            "type": "generated-index",
+            "title": "Output of SeaTunnel",
+            "description": "List all output supported Apache SeaTunnel for now.",
+            "slug": "/category/output",
+            "keywords": ["output"],
+            "image": "/img/favicon.ico"
+          },
+          "items": [
+            "configuration/output-plugin",
+            {
+              "type": "autogenerated",
+              "dirName": "configuration/output-plugins"
+            }
+          ]
+        }
+      ]
+    },
+    "deployment",
+    "developing-plugin",
+    "monitoring"
+  ]
+}
diff --git a/versions.json b/versions.json
index b94eeffa..d1be432c 100644
--- a/versions.json
+++ b/versions.json
@@ -1,3 +1,4 @@
 [
-  "2.1.0"
+  "2.1.0",
+  "1.x"
 ]