You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@dolphinscheduler.apache.org by ke...@apache.org on 2022/12/22 06:00:36 UTC

[dolphinscheduler] 01/01: Add Terraform deploy manifests for quick setup in AWS

This is an automated email from the ASF dual-hosted git repository.

kezhenxu94 pushed a commit to branch terraform
in repository https://gitbox.apache.org/repos/asf/dolphinscheduler.git

commit a4079e46ce6d261cf8af2df6ab26ccd05f835232
Author: kezhenxu94 <ke...@apache.org>
AuthorDate: Thu Dec 22 14:00:22 2022 +0800

    Add Terraform deploy manifests for quick setup in AWS
---
 deploy/terraform/.gitignore                        |  39 ++++++
 deploy/terraform/README.md                         |  11 ++
 deploy/terraform/aws/.terraform.lock.hcl           |  79 ++++++++++++
 deploy/terraform/aws/README.md                     | 143 +++++++++++++++++++++
 deploy/terraform/aws/dolphinscheduler-alert.tf     |  87 +++++++++++++
 deploy/terraform/aws/dolphinscheduler-api.tf       |  87 +++++++++++++
 deploy/terraform/aws/dolphinscheduler-master.tf    |  96 ++++++++++++++
 deploy/terraform/aws/dolphinscheduler-output.tf    |  84 ++++++++++++
 .../terraform/aws/dolphinscheduler-standalone.tf   |  87 +++++++++++++
 deploy/terraform/aws/dolphinscheduler-variables.tf |  97 ++++++++++++++
 deploy/terraform/aws/dolphinscheduler-worker.tf    |  91 +++++++++++++
 deploy/terraform/aws/key-pair-main.tf              |  18 +++
 deploy/terraform/aws/network-main.tf               |  68 ++++++++++
 deploy/terraform/aws/network-variables.tf          |  37 ++++++
 deploy/terraform/aws/os-versions.tf                |  29 +++++
 deploy/terraform/aws/packer/ds-ami-local.pkr.hcl   |  76 +++++++++++
 .../terraform/aws/packer/ds-ami-official.pkr.hcl   |  73 +++++++++++
 deploy/terraform/aws/provider-main.tf              |   9 ++
 deploy/terraform/aws/provider-variables.tf         |  29 +++++
 deploy/terraform/aws/rds-main.tf                   |  37 ++++++
 deploy/terraform/aws/rds-output.tf                 |  14 ++
 deploy/terraform/aws/rds-variables.tf              |  13 ++
 deploy/terraform/aws/s3-main.tf                    |  53 ++++++++
 deploy/terraform/aws/s3-outputs.tf                 |  25 ++++
 deploy/terraform/aws/s3-variables.tf               |   4 +
 deploy/terraform/aws/templates/cloud-init.yaml     |  72 +++++++++++
 .../aws/templates/zookeeper/cloud-init.yaml        |  29 +++++
 deploy/terraform/aws/zookeeper-main.tf             |  96 ++++++++++++++
 deploy/terraform/aws/zookeeper-output.tf           |  16 +++
 deploy/terraform/aws/zookeeper-variables.tf        |   5 +
 30 files changed, 1604 insertions(+)

diff --git a/deploy/terraform/.gitignore b/deploy/terraform/.gitignore
new file mode 100644
index 0000000000..ba83a8e481
--- /dev/null
+++ b/deploy/terraform/.gitignore
@@ -0,0 +1,39 @@
+# Standard TF .gitignore
+# Local .terraform directories
+**/.terraform/*
+
+# .tfstate files
+*.tfstate
+*.tfstate.*
+
+# Crash log files
+crash.log
+crash.*.log
+
+# Exclude all .tfvars files, which are likely to contain sensitive data, such as
+# password, private keys, and other secrets. These should not be part of version
+# control as they are data points which are potentially sensitive and subject
+# to change depending on the environment.
+*.tfvars
+*.tfvars.json
+
+# Ignore override files as they are usually used to override resources locally and so
+# are not checked in
+override.tf
+override.tf.json
+*_override.tf
+*_override.tf.json
+
+# Include override files you do wish to add to version control using negated pattern
+# !example_override.tf
+
+# Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan
+# example: *tfplan*
+
+# Ignore CLI configuration files
+.terraformrc
+terraform.rc
+
+*.pem
+
+*.pkrvars.hcl
diff --git a/deploy/terraform/README.md b/deploy/terraform/README.md
new file mode 100644
index 0000000000..bc4adab641
--- /dev/null
+++ b/deploy/terraform/README.md
@@ -0,0 +1,11 @@
+# Terraform for DolphinScheduler
+
+Terraform scripts to set up a DolphinScheduler environment (standalone mode or/and cluster mode) in minutes (if not seconds).
+
+## Install Terraform
+
+Install Terraform according to [the documentation](https://developer.hashicorp.com/terraform/downloads?product_intent=terraform).
+
+## AWS
+
+Refer to [the doc](aws/README.md).
diff --git a/deploy/terraform/aws/.terraform.lock.hcl b/deploy/terraform/aws/.terraform.lock.hcl
new file mode 100644
index 0000000000..b80b60b054
--- /dev/null
+++ b/deploy/terraform/aws/.terraform.lock.hcl
@@ -0,0 +1,79 @@
+# This file is maintained automatically by "terraform init".
+# Manual edits may be lost in future updates.
+
+provider "registry.terraform.io/hashicorp/aws" {
+  version = "4.42.0"
+  hashes = [
+    "h1:cS7q80JomJrUZpm+bnK5H/iRjF5+7HAA3qgw+JznPiM=",
+    "zh:091b64bccee701462b19ca99fe3bff0716e9445a88d0e4d0d0f322062b02bb60",
+    "zh:1fd9b0bf3421ad65284d693e60de068fc9b247d4fa7df6c1d62ad4796088f795",
+    "zh:3e34e4fcfaa30b04811aaa92c4d6115ddaa820ac11fa82ad217f42ae17a068ea",
+    "zh:47b412ab9cc3730797659ffb775a429b5398a5f403c9cca2ec5f663e21a69077",
+    "zh:8e29e90fdf29d76bb8fab62c184c4ec78e37030277dbe2c0dd97557fdfcbcb50",
+    "zh:8ef4e94b5672234a68649bdbb93416c1829c5cd6f37be584f8e8610f14ca95b0",
+    "zh:92a3eb5ae0c2c83717973c56b0427bf1fe8fba3ba72ced01e5eefc5c0cff8bf3",
+    "zh:96b9f714aed24206f8f47af39426aa8c02f172ac6d5516bcc375583d120bf4f8",
+    "zh:996ec2065cf0c52b125e3ac8bbd059e5733d8393143cebbb427a236d08c742b4",
+    "zh:9b12af85486a96aedd8d7984b0ff811a4b42e3d88dad1a3fb4c0b580d04fa425",
+    "zh:b3abba32ccbea87b6a46e846c878ca0b2e9736dbaedd512a712398ece91de431",
+    "zh:b77292051d499f66ed80434ac435930204b92ca906c0d38fdb8a0ac37efa25ae",
+    "zh:f4ed19b15bd7cd99ee248023a94a10d586e69f81d0d8326d87a79cc37b579a4f",
+    "zh:f906a8003e6ad0dd561d8c62e02a65835d8c5009dd7cbfe03e28becced82e5db",
+    "zh:faff211d1559cbae669b63cdd6436a2ef0fb24108d8fa73b625dc5334b50aada",
+  ]
+}
+
+provider "registry.terraform.io/hashicorp/local" {
+  version = "2.2.3"
+  hashes = [
+    "h1:KmHz81iYgw9Xn2L3Carc2uAzvFZ1XsE7Js3qlVeC77k=",
+    "zh:04f0978bb3e052707b8e82e46780c371ac1c66b689b4a23bbc2f58865ab7d5c0",
+    "zh:6484f1b3e9e3771eb7cc8e8bab8b35f939a55d550b3f4fb2ab141a24269ee6aa",
+    "zh:78a56d59a013cb0f7eb1c92815d6eb5cf07f8b5f0ae20b96d049e73db915b238",
+    "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
+    "zh:8aa9950f4c4db37239bcb62e19910c49e47043f6c8587e5b0396619923657797",
+    "zh:996beea85f9084a725ff0e6473a4594deb5266727c5f56e9c1c7c62ded6addbb",
+    "zh:9a7ef7a21f48fabfd145b2e2a4240ca57517ad155017e86a30860d7c0c109de3",
+    "zh:a63e70ac052aa25120113bcddd50c1f3cfe61f681a93a50cea5595a4b2cc3e1c",
+    "zh:a6e8d46f94108e049ad85dbed60354236dc0b9b5ec8eabe01c4580280a43d3b8",
+    "zh:bb112ce7efbfcfa0e65ed97fa245ef348e0fd5bfa5a7e4ab2091a9bd469f0a9e",
+    "zh:d7bec0da5c094c6955efed100f3fe22fca8866859f87c025be1760feb174d6d9",
+    "zh:fb9f271b72094d07cef8154cd3d50e9aa818a0ea39130bc193132ad7b23076fd",
+  ]
+}
+
+provider "registry.terraform.io/hashicorp/template" {
+  version = "2.2.0"
+  hashes = [
+    "h1:0wlehNaxBX7GJQnPfQwTNvvAf38Jm0Nv7ssKGMaG6Og=",
+    "zh:01702196f0a0492ec07917db7aaa595843d8f171dc195f4c988d2ffca2a06386",
+    "zh:09aae3da826ba3d7df69efeb25d146a1de0d03e951d35019a0f80e4f58c89b53",
+    "zh:09ba83c0625b6fe0a954da6fbd0c355ac0b7f07f86c91a2a97849140fea49603",
+    "zh:0e3a6c8e16f17f19010accd0844187d524580d9fdb0731f675ffcf4afba03d16",
+    "zh:45f2c594b6f2f34ea663704cc72048b212fe7d16fb4cfd959365fa997228a776",
+    "zh:77ea3e5a0446784d77114b5e851c970a3dde1e08fa6de38210b8385d7605d451",
+    "zh:8a154388f3708e3df5a69122a23bdfaf760a523788a5081976b3d5616f7d30ae",
+    "zh:992843002f2db5a11e626b3fc23dc0c87ad3729b3b3cff08e32ffb3df97edbde",
+    "zh:ad906f4cebd3ec5e43d5cd6dc8f4c5c9cc3b33d2243c89c5fc18f97f7277b51d",
+    "zh:c979425ddb256511137ecd093e23283234da0154b7fa8b21c2687182d9aea8b2",
+  ]
+}
+
+provider "registry.terraform.io/hashicorp/tls" {
+  version = "4.0.4"
+  hashes = [
+    "h1:Wd3RqmQW60k2QWPN4sK5CtjGuO1d+CRNXgC+D4rKtXc=",
+    "zh:23671ed83e1fcf79745534841e10291bbf34046b27d6e68a5d0aab77206f4a55",
+    "zh:45292421211ffd9e8e3eb3655677700e3c5047f71d8f7650d2ce30242335f848",
+    "zh:59fedb519f4433c0fdb1d58b27c210b27415fddd0cd73c5312530b4309c088be",
+    "zh:5a8eec2409a9ff7cd0758a9d818c74bcba92a240e6c5e54b99df68fff312bbd5",
+    "zh:5e6a4b39f3171f53292ab88058a59e64825f2b842760a4869e64dc1dc093d1fe",
+    "zh:810547d0bf9311d21c81cc306126d3547e7bd3f194fc295836acf164b9f8424e",
+    "zh:824a5f3617624243bed0259d7dd37d76017097dc3193dac669be342b90b2ab48",
+    "zh:9361ccc7048be5dcbc2fafe2d8216939765b3160bd52734f7a9fd917a39ecbd8",
+    "zh:aa02ea625aaf672e649296bce7580f62d724268189fe9ad7c1b36bb0fa12fa60",
+    "zh:c71b4cd40d6ec7815dfeefd57d88bc592c0c42f5e5858dcc88245d371b4b8b1e",
+    "zh:dabcd52f36b43d250a3d71ad7abfa07b5622c69068d989e60b79b2bb4f220316",
+    "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c",
+  ]
+}
diff --git a/deploy/terraform/aws/README.md b/deploy/terraform/aws/README.md
new file mode 100644
index 0000000000..c822a2322d
--- /dev/null
+++ b/deploy/terraform/aws/README.md
@@ -0,0 +1,143 @@
+# Prerequisites
+
+- [Packer](https://developer.hashicorp.com/packer/downloads)
+- [Terraform](https://developer.hashicorp.com/terraform/downloads?ajs_aid=e8824c6e-5f6f-480c-bb7d-27f8c97f8d8d&product_intent=terraform)
+
+# Build AMI
+
+Set necessary variables by creating a file `ds-ami.pkrvars.hcl` and adding the following variables according to your own usage.
+
+```hcl
+cat <<EOF > ds-ami.pkrvars.hcl
+aws_access_key = ""
+aws_secret_key = ""
+aws_region     = "cn-north-1"
+
+
+ds_ami_name = "my-test-ds-2"
+
+# If you want to use the official distribution tar, just set the `ds_version` to the one you want.
+ds_version  = 3.1.1
+
+# If you want to use a locally built distribution tar, set the `ds_tar` to the tar file location.
+ds_tar      = "~/workspace/dolphinscheduler/dolphinscheduler-dist/target/apache-dolphinscheduler-3.1.3-SNAPSHOT-bin.tar.gz"
+EOF
+```
+
+Then run the following command to initialize and build a custom AMI.
+
+- If you want to use the official distribution tar.
+
+```shell
+packer init --var-file=ds-ami.pkrvars.hcl packer/ds-ami-official.pkr.hcl
+packer build --var-file=ds-ami.pkrvars.hcl packer/ds-ami-official.pkr.hcl
+```
+
+- If you want to use the locally built distribution tar.
+
+```shell
+packer init --var-file=ds-ami.pkrvars.hcl packer/ds-ami-local.pkr.hcl
+packer build --var-file=ds-ami.pkrvars.hcl packer/ds-ami-local.pkr.hcl
+```
+
+# Create resources
+
+Set necessary variables by creating a file `terraform.tfvars` and adding the following variables according to your own usage.
+
+Make sure `ds_ami_name` is the same as the one in `ds-ami.pkrvars.hcl` above.
+
+```tfvars
+cat <<EOF > terraform.tfvars
+aws_access_key = ""
+aws_secret_key = ""
+
+name_prefix = "test-ds-terraform"
+ds_ami_name = "my-test-ds"
+
+ds_component_replicas = {
+  master            = 1
+  worker            = 1
+  alert             = 1
+  api               = 1
+  standalone_server = 0
+}
+EOF
+```
+
+Then run the following commands to apply necessary resources.
+
+```shell
+terraform init -var-file=terraform.tfvars
+terraform apply -var-file=terraform.tfvars -auto-approve
+```
+
+# Open DolphinScheduler UI
+
+```shell
+open http://$(terraform output -json api_server_instance_public_dns | jq -r '.[0]'):12345/dolphinscheduler/ui
+```
+
+# Inputs
+
+| Name | Description | Type | Default | Required |
+|------|-------------|------|---------|:--------:|
+| <a name="input_aws_access_key"></a> [aws\_access\_key](#input\_aws\_access\_key) | AWS access key | `string` | n/a | yes |
+| <a name="input_aws_region"></a> [aws\_region](#input\_aws\_region) | AWS region | `string` | `"cn-north-1"` | no |
+| <a name="input_aws_secret_key"></a> [aws\_secret\_key](#input\_aws\_secret\_key) | AWS secret key | `string` | n/a | yes |
+| <a name="input_db_instance_class"></a> [db\_instance\_class](#input\_db\_instance\_class) | Database instance class | `string` | `"db.t3.micro"` | no |
+| <a name="input_db_password"></a> [db\_password](#input\_db\_password) | Database password | `string` | n/a | yes |
+| <a name="input_db_username"></a> [db\_username](#input\_db\_username) | Database username | `string` | `"dolphinscheduler"` | no |
+| <a name="input_ds_ami_name"></a> [ds\_ami\_name](#input\_ds\_ami\_name) | Name of DolphinScheduler AMI | `string` | `"dolphinscheduler-ami"` | no |
+| <a name="input_ds_component_replicas"></a> [ds\_component\_replicas](#input\_ds\_component\_replicas) | Replicas of the DolphinScheduler Components | `map(number)` | <pre>{<br>  "alert": 1,<br>  "api": 1,<br>  "master": 1,<br>  "standalone_server": 0,<br>  "worker": 1<br>}</pre> | no |
+| <a name="input_ds_version"></a> [ds\_version](#input\_ds\_version) | DolphinScheduler Version | `string` | `"3.1.1"` | no |
+| <a name="input_name_prefix"></a> [name\_prefix](#input\_name\_prefix) | Name prefix for all resources | `string` | `"dolphinscheduler"` | no |
+| <a name="input_private_subnet_cidr_blocks"></a> [private\_subnet\_cidr\_blocks](#input\_private\_subnet\_cidr\_blocks) | Available CIDR blocks for private subnets | `list(string)` | <pre>[<br>  "10.0.101.0/24",<br>  "10.0.102.0/24",<br>  "10.0.103.0/24",<br>  "10.0.104.0/24"<br>]</pre> | no |
+| <a name="input_public_subnet_cidr_blocks"></a> [public\_subnet\_cidr\_blocks](#input\_public\_subnet\_cidr\_blocks) | CIDR blocks for the public subnets | `list(string)` | <pre>[<br>  "10.0.1.0/24",<br>  "10.0.2.0/24",<br>  "10.0.3.0/24",<br>  "10.0.4.0/24"<br>]</pre> | no |
+| <a name="input_s3_bucket_prefix"></a> [s3\_bucket\_prefix](#input\_s3\_bucket\_prefix) | n/a | `string` | `"dolphinscheduler-test-"` | no |
+| <a name="input_subnet_count"></a> [subnet\_count](#input\_subnet\_count) | Number of subnets | `map(number)` | <pre>{<br>  "private": 2,<br>  "public": 1<br>}</pre> | no |
+| <a name="input_tags"></a> [tags](#input\_tags) | Tags to apply to all resources | `map(string)` | <pre>{<br>  "Deployment": "Test"<br>}</pre> | no |
+| <a name="input_vm_associate_public_ip_address"></a> [vm\_associate\_public\_ip\_address](#input\_vm\_associate\_public\_ip\_address) | Associate a public IP address to the EC2 instance | `map(bool)` | <pre>{<br>  "alert": true,<br>  "api": true,<br>  "master": true,<br>  "standalone_server": true,<br>  "worker": true<br>}</pre> | no |
+| <a name="input_vm_data_volume_size"></a> [vm\_data\_volume\_size](#input\_vm\_data\_volume\_size) | Data volume size of the EC2 Instance | `map(number)` | <pre>{<br>  "alert": 10,<br>  "api": 10,<br>  "master": 10,<br>  "standalone_server": 10,<br>  "worker": 10<br>}</pre> | no |
+| <a name="input_vm_data_volume_type"></a> [vm\_data\_volume\_type](#input\_vm\_data\_volume\_type) | Data volume type of the EC2 Instance | `map(string)` | <pre>{<br>  "alert": "gp2",<br>  "api": "gp2",<br>  "master": "gp2",<br>  "standalone_server": "gp2",<br>  "worker": "gp2"<br>}</pre> | no |
+| <a name="input_vm_instance_type"></a> [vm\_instance\_type](#input\_vm\_instance\_type) | EC2 instance type | `map(string)` | <pre>{<br>  "alert": "t2.micro",<br>  "api": "t2.small",<br>  "master": "t2.medium",<br>  "standalone_server": "t2.small",<br>  "worker": "t2.medium"<br>}</pre> | no |
+| <a name="input_vm_root_volume_size"></a> [vm\_root\_volume\_size](#input\_vm\_root\_volume\_size) | Root Volume size of the EC2 Instance | `map(number)` | <pre>{<br>  "alert": 30,<br>  "api": 30,<br>  "master": 30,<br>  "standalone_server": 30,<br>  "worker": 30<br>}</pre> | no |
+| <a name="input_vm_root_volume_type"></a> [vm\_root\_volume\_type](#input\_vm\_root\_volume\_type) | Root volume type of the EC2 Instance | `map(string)` | <pre>{<br>  "alert": "gp2",<br>  "api": "gp2",<br>  "master": "gp2",<br>  "standalone_server": "gp2",<br>  "worker": "gp2"<br>}</pre> | no |
+| <a name="input_vpc_cidr"></a> [vpc\_cidr](#input\_vpc\_cidr) | CIDR for the VPC | `string` | `"10.0.0.0/16"` | no |
+| <a name="input_zookeeper_connect_string"></a> [zookeeper\_connect\_string](#input\_zookeeper\_connect\_string) | Zookeeper connect string, if empty, will create a single-node zookeeper for demonstration, don't use this in production | `string` | `""` | no |
+
+# Outputs
+
+| Name | Description |
+|------|-------------|
+| <a name="output_alert_server_instance_id"></a> [alert\_server\_instance\_id](#output\_alert\_server\_instance\_id) | Instance IDs of alert instances |
+| <a name="output_alert_server_instance_private_ip"></a> [alert\_server\_instance\_private\_ip](#output\_alert\_server\_instance\_private\_ip) | Private IPs of alert instances |
+| <a name="output_alert_server_instance_public_dns"></a> [alert\_server\_instance\_public\_dns](#output\_alert\_server\_instance\_public\_dns) | Public domain names of alert instances |
+| <a name="output_alert_server_instance_public_ip"></a> [alert\_server\_instance\_public\_ip](#output\_alert\_server\_instance\_public\_ip) | Public IPs of alert instances |
+| <a name="output_api_server_instance_id"></a> [api\_server\_instance\_id](#output\_api\_server\_instance\_id) | Instance IDs of api instances |
+| <a name="output_api_server_instance_private_ip"></a> [api\_server\_instance\_private\_ip](#output\_api\_server\_instance\_private\_ip) | Private IPs of api instances |
+| <a name="output_api_server_instance_public_dns"></a> [api\_server\_instance\_public\_dns](#output\_api\_server\_instance\_public\_dns) | Public domain names of api instances |
+| <a name="output_api_server_instance_public_ip"></a> [api\_server\_instance\_public\_ip](#output\_api\_server\_instance\_public\_ip) | Public IPs of api instances |
+| <a name="output_db_address"></a> [db\_address](#output\_db\_address) | Database address |
+| <a name="output_db_name"></a> [db\_name](#output\_db\_name) | Database name |
+| <a name="output_db_port"></a> [db\_port](#output\_db\_port) | Database port |
+| <a name="output_master_server_instance_id"></a> [master\_server\_instance\_id](#output\_master\_server\_instance\_id) | Instance IDs of master instances |
+| <a name="output_master_server_instance_private_ip"></a> [master\_server\_instance\_private\_ip](#output\_master\_server\_instance\_private\_ip) | Private IPs of master instances |
+| <a name="output_master_server_instance_public_dns"></a> [master\_server\_instance\_public\_dns](#output\_master\_server\_instance\_public\_dns) | Public domain names of master instances |
+| <a name="output_master_server_instance_public_ip"></a> [master\_server\_instance\_public\_ip](#output\_master\_server\_instance\_public\_ip) | Public IPs of master instances |
+| <a name="output_s3_access_key"></a> [s3\_access\_key](#output\_s3\_access\_key) | S3 access key |
+| <a name="output_s3_address"></a> [s3\_address](#output\_s3\_address) | S3 address |
+| <a name="output_s3_bucket"></a> [s3\_bucket](#output\_s3\_bucket) | S3 bucket name |
+| <a name="output_s3_regional_domain_name"></a> [s3\_regional\_domain\_name](#output\_s3\_regional\_domain\_name) | S3 regional domain name |
+| <a name="output_s3_secret"></a> [s3\_secret](#output\_s3\_secret) | S3 access secret |
+| <a name="output_vm_server_instance_id"></a> [vm\_server\_instance\_id](#output\_vm\_server\_instance\_id) | Instance IDs of standalone instances |
+| <a name="output_vm_server_instance_private_ip"></a> [vm\_server\_instance\_private\_ip](#output\_vm\_server\_instance\_private\_ip) | Private IPs of standalone instances |
+| <a name="output_vm_server_instance_public_dns"></a> [vm\_server\_instance\_public\_dns](#output\_vm\_server\_instance\_public\_dns) | Public domain names of standalone instances |
+| <a name="output_vm_server_instance_public_ip"></a> [vm\_server\_instance\_public\_ip](#output\_vm\_server\_instance\_public\_ip) | Public IPs of standalone instances |
+| <a name="output_worker_server_instance_id"></a> [worker\_server\_instance\_id](#output\_worker\_server\_instance\_id) | Instance IDs of worker instances |
+| <a name="output_worker_server_instance_private_ip"></a> [worker\_server\_instance\_private\_ip](#output\_worker\_server\_instance\_private\_ip) | Private IPs of worker instances |
+| <a name="output_worker_server_instance_public_dns"></a> [worker\_server\_instance\_public\_dns](#output\_worker\_server\_instance\_public\_dns) | Public domain names of worker instances |
+| <a name="output_worker_server_instance_public_ip"></a> [worker\_server\_instance\_public\_ip](#output\_worker\_server\_instance\_public\_ip) | Public IPs of worker instances |
+| <a name="output_zookeeper_server_instance_id"></a> [zookeeper\_server\_instance\_id](#output\_zookeeper\_server\_instance\_id) | Instance IDs of zookeeper instances |
+| <a name="output_zookeeper_server_instance_private_ip"></a> [zookeeper\_server\_instance\_private\_ip](#output\_zookeeper\_server\_instance\_private\_ip) | Private IPs of zookeeper instances |
+| <a name="output_zookeeper_server_instance_public_dns"></a> [zookeeper\_server\_instance\_public\_dns](#output\_zookeeper\_server\_instance\_public\_dns) | Public domain names of zookeeper instances |
+| <a name="output_zookeeper_server_instance_public_ip"></a> [zookeeper\_server\_instance\_public\_ip](#output\_zookeeper\_server\_instance\_public\_ip) | Public IPs of zookeeper instances |
diff --git a/deploy/terraform/aws/dolphinscheduler-alert.tf b/deploy/terraform/aws/dolphinscheduler-alert.tf
new file mode 100644
index 0000000000..6cf6a3fb11
--- /dev/null
+++ b/deploy/terraform/aws/dolphinscheduler-alert.tf
@@ -0,0 +1,87 @@
+resource "aws_security_group" "alert" {
+  name        = "alert_server_sg"
+  description = "Allow incoming connections"
+  vpc_id      = aws_vpc._.id
+  ingress {
+    from_port       = 50052
+    to_port         = 50053
+    protocol        = "tcp"
+    security_groups = [aws_security_group.worker.id]
+    description     = "Allow incoming HTTP connections"
+  }
+  ingress {
+    from_port   = 22
+    to_port     = 22
+    protocol    = "tcp"
+    cidr_blocks = ["0.0.0.0/0"]
+    description = "Allow incoming SSH connections (Linux)"
+  }
+  egress {
+    from_port   = 0
+    to_port     = 0
+    protocol    = "-1"
+    cidr_blocks = ["0.0.0.0/0"]
+  }
+  tags = merge(var.tags, {
+    "Name" = "${var.name_prefix}-alert-sg"
+  })
+}
+
+data "template_file" "alert_user_data" {
+  template = file("templates/cloud-init.yaml")
+  vars = {
+    "ssh_public_key"             = aws_key_pair.key_pair.public_key
+    "dolphinscheduler_version"   = var.ds_version
+    "dolphinscheduler_component" = "alert-server"
+    "database_address"           = aws_db_instance.database.address
+    "database_port"              = aws_db_instance.database.port
+    "database_name"              = aws_db_instance.database.db_name
+    "database_username"          = aws_db_instance.database.username
+    "database_password"          = aws_db_instance.database.password
+    "zookeeper_connect_string"   = var.zookeeper_connect_string != "" ? var.zookeeper_connect_string : aws_instance.zookeeper[0].private_ip
+    "alert_server_host"          = ""
+    "s3_access_key_id"           = aws_iam_access_key.s3.id
+    "s3_secret_access_key"       = aws_iam_access_key.s3.secret
+    "s3_region"                  = var.aws_region
+    "s3_bucket_name"             = module.s3_bucket.s3_bucket_id
+    "s3_endpoint"                = ""
+  }
+}
+
+resource "aws_instance" "alert" {
+  count = var.ds_component_replicas.alert
+
+  ami                         = data.aws_ami.dolphinscheduler.id
+  instance_type               = var.vm_instance_type.alert
+  subnet_id                   = aws_subnet.public[0].id
+  vpc_security_group_ids      = [aws_security_group.alert.id]
+  source_dest_check           = false
+  associate_public_ip_address = var.vm_associate_public_ip_address.alert
+
+  user_data = data.template_file.alert_user_data.rendered
+
+  root_block_device {
+    volume_size           = var.vm_root_volume_size.alert
+    volume_type           = var.vm_root_volume_type.alert
+    delete_on_termination = true
+    encrypted             = true
+    tags = merge(var.tags, {
+      "Name" = "${var.name_prefix}-rbd-alert-${count.index}"
+    })
+  }
+
+  ebs_block_device {
+    device_name           = "/dev/xvda"
+    volume_size           = var.vm_data_volume_size.alert
+    volume_type           = var.vm_data_volume_type.alert
+    encrypted             = true
+    delete_on_termination = true
+    tags = merge(var.tags, {
+      "Name" = "${var.name_prefix}-ebd-alert-${count.index}"
+    })
+  }
+
+  tags = merge(var.tags, {
+    "Name" = "${var.name_prefix}-alert-${count.index}"
+  })
+}
diff --git a/deploy/terraform/aws/dolphinscheduler-api.tf b/deploy/terraform/aws/dolphinscheduler-api.tf
new file mode 100644
index 0000000000..b32911cb1c
--- /dev/null
+++ b/deploy/terraform/aws/dolphinscheduler-api.tf
@@ -0,0 +1,87 @@
+resource "aws_security_group" "api" {
+  name        = "api"
+  description = "Allow incoming connections"
+  vpc_id      = aws_vpc._.id
+  ingress {
+    from_port   = 12345
+    to_port     = 12345
+    protocol    = "tcp"
+    cidr_blocks = ["0.0.0.0/0"]
+    description = "Allow incoming HTTP connections"
+  }
+  ingress {
+    from_port   = 22
+    to_port     = 22
+    protocol    = "tcp"
+    cidr_blocks = ["0.0.0.0/0"]
+    description = "Allow incoming SSH connections (Linux)"
+  }
+  egress {
+    from_port   = 0
+    to_port     = 0
+    protocol    = "-1"
+    cidr_blocks = ["0.0.0.0/0"]
+  }
+  tags = merge(var.tags, {
+    "Name" = "${var.name_prefix}-sg"
+  })
+}
+
+data "template_file" "api_user_data" {
+  template = file("templates/cloud-init.yaml")
+  vars = {
+    "ssh_public_key"             = aws_key_pair.key_pair.public_key
+    "dolphinscheduler_version"   = var.ds_version
+    "dolphinscheduler_component" = "api-server"
+    "database_address"           = aws_db_instance.database.address
+    "database_port"              = aws_db_instance.database.port
+    "database_name"              = aws_db_instance.database.db_name
+    "database_username"          = aws_db_instance.database.username
+    "database_password"          = aws_db_instance.database.password
+    "zookeeper_connect_string"   = var.zookeeper_connect_string != "" ? var.zookeeper_connect_string : aws_instance.zookeeper[0].private_ip
+    "alert_server_host"          = ""
+    "s3_access_key_id"           = aws_iam_access_key.s3.id
+    "s3_secret_access_key"       = aws_iam_access_key.s3.secret
+    "s3_region"                  = var.aws_region
+    "s3_bucket_name"             = module.s3_bucket.s3_bucket_id
+    "s3_endpoint"                = ""
+  }
+}
+
+resource "aws_instance" "api" {
+  count = var.ds_component_replicas.api
+
+  ami                         = data.aws_ami.dolphinscheduler.id
+  instance_type               = var.vm_instance_type.api
+  subnet_id                   = aws_subnet.public[0].id
+  vpc_security_group_ids      = [aws_security_group.api.id]
+  source_dest_check           = false
+  associate_public_ip_address = var.vm_associate_public_ip_address.api
+
+  user_data = data.template_file.api_user_data.rendered
+
+  root_block_device {
+    volume_size           = var.vm_root_volume_size.api
+    volume_type           = var.vm_root_volume_type.api
+    delete_on_termination = true
+    encrypted             = true
+    tags = merge(var.tags, {
+      "Name" = "${var.name_prefix}-rdb-api-${count.index}"
+    })
+  }
+
+  ebs_block_device {
+    device_name           = "/dev/xvda"
+    volume_size           = var.vm_data_volume_size.api
+    volume_type           = var.vm_data_volume_type.api
+    encrypted             = true
+    delete_on_termination = true
+    tags = merge(var.tags, {
+      "Name" = "${var.name_prefix}-ebd-api-${count.index}"
+    })
+  }
+
+  tags = merge(var.tags, {
+    "Name" = "${var.name_prefix}-api-${count.index}"
+  })
+}
diff --git a/deploy/terraform/aws/dolphinscheduler-master.tf b/deploy/terraform/aws/dolphinscheduler-master.tf
new file mode 100644
index 0000000000..2f52f3886a
--- /dev/null
+++ b/deploy/terraform/aws/dolphinscheduler-master.tf
@@ -0,0 +1,96 @@
+resource "aws_security_group" "master" {
+  name        = "master"
+  description = "Allow incoming connections"
+  vpc_id      = aws_vpc._.id
+  ingress {
+    from_port   = 22
+    to_port     = 22
+    protocol    = "tcp"
+    cidr_blocks = ["0.0.0.0/0"]
+    description = "Allow incoming SSH connections (Linux)"
+  }
+  ingress {
+    from_port       = 5678
+    to_port         = 5678
+    protocol        = "tcp"
+    security_groups = [aws_security_group.api.id]
+    description     = "Allow incoming HTTP connections"
+  }
+  egress {
+    from_port   = 0
+    to_port     = 0
+    protocol    = "-1"
+    cidr_blocks = ["0.0.0.0/0"]
+  }
+  tags = merge(var.tags, {
+    "Name" = "${var.name_prefix}-sg"
+  })
+}
+
+resource "aws_security_group_rule" "master_worker" {
+  security_group_id        = aws_security_group.master.id
+  from_port                = 5678
+  to_port                  = 5678
+  protocol                 = "tcp"
+  type                     = "ingress"
+  source_security_group_id = aws_security_group.worker.id
+}
+
+data "template_file" "master_user_data" {
+  template = file("templates/cloud-init.yaml")
+  vars = {
+    "ssh_public_key"             = aws_key_pair.key_pair.public_key
+    "dolphinscheduler_version"   = var.ds_version
+    "dolphinscheduler_component" = "master-server"
+    "database_address"           = aws_db_instance.database.address
+    "database_port"              = aws_db_instance.database.port
+    "database_name"              = aws_db_instance.database.db_name
+    "database_username"          = aws_db_instance.database.username
+    "database_password"          = aws_db_instance.database.password
+    "zookeeper_connect_string"   = var.zookeeper_connect_string != "" ? var.zookeeper_connect_string : aws_instance.zookeeper[0].private_ip
+    "alert_server_host"          = ""
+    "s3_access_key_id"           = aws_iam_access_key.s3.id
+    "s3_secret_access_key"       = aws_iam_access_key.s3.secret
+    "s3_region"                  = var.aws_region
+    "s3_bucket_name"             = module.s3_bucket.s3_bucket_id
+    "s3_endpoint"                = ""
+  }
+}
+
+resource "aws_instance" "master" {
+  count = var.ds_component_replicas.master
+
+  ami                         = data.aws_ami.dolphinscheduler.id
+  instance_type               = var.vm_instance_type.master
+  subnet_id                   = aws_subnet.public[0].id
+  vpc_security_group_ids      = [aws_security_group.master.id]
+  source_dest_check           = false
+  associate_public_ip_address = var.vm_associate_public_ip_address.master
+
+  user_data = data.template_file.master_user_data.rendered
+
+  root_block_device {
+    volume_size           = var.vm_root_volume_size.master
+    volume_type           = var.vm_root_volume_type.master
+    delete_on_termination = true
+    encrypted             = true
+    tags = merge(var.tags, {
+      "Name" = "${var.name_prefix}-rbd-master-${count.index}"
+    })
+  }
+
+  ebs_block_device {
+    device_name           = "/dev/xvda"
+    volume_size           = var.vm_data_volume_size.master
+    volume_type           = var.vm_data_volume_type.master
+    encrypted             = true
+    delete_on_termination = true
+    tags = merge(var.tags, {
+      "Name" = "${var.name_prefix}-ebd-master-${count.index}"
+    })
+  }
+
+  tags = merge(var.tags, {
+    "Name" = "${var.name_prefix}-master-${count.index}"
+  })
+}
diff --git a/deploy/terraform/aws/dolphinscheduler-output.tf b/deploy/terraform/aws/dolphinscheduler-output.tf
new file mode 100644
index 0000000000..95c7237d3e
--- /dev/null
+++ b/deploy/terraform/aws/dolphinscheduler-output.tf
@@ -0,0 +1,84 @@
+output "vm_server_instance_id" {
+  value = [for vm in aws_instance.standalone_server : vm.id]
+  description = "Instance IDs of standalone instances"
+}
+output "vm_server_instance_private_ip" {
+  value = [for vm in aws_instance.standalone_server : vm.private_ip]
+  description = "Private IPs of standalone instances"
+}
+output "vm_server_instance_public_dns" {
+  value = [for vm in aws_instance.standalone_server : vm.public_dns]
+  description = "Public domain names of standalone instances"
+}
+output "vm_server_instance_public_ip" {
+  value = [for vm in aws_instance.standalone_server : vm.public_ip]
+  description = "Public IPs of standalone instances"
+}
+
+output "master_server_instance_id" {
+  value = [for vm in aws_instance.master : vm.id]
+  description = "Instance IDs of master instances"
+}
+output "master_server_instance_private_ip" {
+  value = [for vm in aws_instance.master : vm.private_ip]
+  description = "Private IPs of master instances"
+}
+output "master_server_instance_public_dns" {
+  value = [for vm in aws_instance.master : vm.public_dns]
+  description = "Public domain names of master instances"
+}
+output "master_server_instance_public_ip" {
+  value = [for vm in aws_instance.master : vm.public_ip]
+  description = "Public IPs of master instances"
+}
+
+output "worker_server_instance_id" {
+  value = [for vm in aws_instance.worker : vm.id]
+  description = "Instance IDs of worker instances"
+}
+output "worker_server_instance_private_ip" {
+  value = [for vm in aws_instance.worker : vm.private_ip]
+  description = "Private IPs of worker instances"
+}
+output "worker_server_instance_public_dns" {
+  value = [for vm in aws_instance.worker : vm.public_dns]
+  description = "Public domain names of worker instances"
+}
+output "worker_server_instance_public_ip" {
+  value = [for vm in aws_instance.worker : vm.public_ip]
+  description = "Public IPs of worker instances"
+}
+
+output "api_server_instance_id" {
+  value = [for vm in aws_instance.api : vm.id]
+  description = "Instance IDs of api instances"
+}
+output "api_server_instance_private_ip" {
+  value = [for vm in aws_instance.api : vm.private_ip]
+  description = "Private IPs of api instances"
+}
+output "api_server_instance_public_dns" {
+  value = [for vm in aws_instance.api : vm.public_dns]
+  description = "Public domain names of api instances"
+}
+output "api_server_instance_public_ip" {
+  value = [for vm in aws_instance.api : vm.public_ip]
+  description = "Public IPs of api instances"
+}
+
+output "alert_server_instance_id" {
+  value = [for vm in aws_instance.alert : vm.id]
+  description = "Instance IDs of alert instances"
+}
+output "alert_server_instance_private_ip" {
+  value = [for vm in aws_instance.alert : vm.private_ip]
+  description = "Private IPs of alert instances"
+}
+output "alert_server_instance_public_dns" {
+  value = [for vm in aws_instance.alert : vm.public_dns]
+  description = "Public domain names of alert instances"
+}
+output "alert_server_instance_public_ip" {
+  value = [for vm in aws_instance.alert : vm.public_ip]
+  description = "Public IPs of alert instances"
+}
diff --git a/deploy/terraform/aws/dolphinscheduler-standalone.tf b/deploy/terraform/aws/dolphinscheduler-standalone.tf
new file mode 100644
index 0000000000..c9851ae834
--- /dev/null
+++ b/deploy/terraform/aws/dolphinscheduler-standalone.tf
@@ -0,0 +1,87 @@
+resource "aws_security_group" "standalone" {
+  name        = "standalone"
+  description = "Allow incoming connections"
+  vpc_id      = aws_vpc._.id
+  ingress {
+    from_port   = 12345
+    to_port     = 12345
+    protocol    = "tcp"
+    cidr_blocks = ["0.0.0.0/0"]
+    description = "Allow incoming HTTP connections"
+  }
+  ingress {
+    from_port   = 22
+    to_port     = 22
+    protocol    = "tcp"
+    cidr_blocks = ["0.0.0.0/0"]
+    description = "Allow incoming SSH connections (Linux)"
+  }
+  egress {
+    from_port   = 0
+    to_port     = 0
+    protocol    = "-1"
+    cidr_blocks = ["0.0.0.0/0"]
+  }
+  tags = merge(var.tags, {
+    "Name" = "${var.name_prefix}-sg"
+  })
+}
+
+data "template_file" "standalone_user_data" {
+  template = file("templates/cloud-init.yaml")
+  vars = {
+    "ssh_public_key"             = aws_key_pair.key_pair.public_key
+    "dolphinscheduler_version"   = var.ds_version
+    "dolphinscheduler_component" = "standalone-server"
+    "database_address"           = aws_db_instance.database.address
+    "database_port"              = aws_db_instance.database.port
+    "database_name"              = aws_db_instance.database.db_name
+    "database_username"          = aws_db_instance.database.username
+    "database_password"          = aws_db_instance.database.password
+    "zookeeper_connect_string"   = ""
+    "alert_server_host"          = ""
+    "s3_access_key_id"           = aws_iam_access_key.s3.id
+    "s3_secret_access_key"       = aws_iam_access_key.s3.secret
+    "s3_region"                  = var.aws_region
+    "s3_bucket_name"             = module.s3_bucket.s3_bucket_id
+    "s3_endpoint"                = ""
+  }
+}
+
+resource "aws_instance" "standalone_server" {
+  count = var.ds_component_replicas.standalone_server
+
+  ami                         = data.aws_ami.dolphinscheduler.id
+  instance_type               = var.vm_instance_type.standalone_server
+  subnet_id                   = aws_subnet.public[0].id
+  vpc_security_group_ids      = [aws_security_group.standalone.id]
+  source_dest_check           = false
+  associate_public_ip_address = var.vm_associate_public_ip_address.standalone_server
+
+  user_data = data.template_file.standalone_user_data.rendered
+
+  root_block_device {
+    volume_size           = var.vm_root_volume_size.standalone_server
+    volume_type           = var.vm_root_volume_type.standalone_server
+    delete_on_termination = true
+    encrypted             = true
+    tags = merge(var.tags, {
+      "Name" = "${var.name_prefix}-rbd-standalone-${count.index}"
+    })
+  }
+
+  ebs_block_device {
+    device_name           = "/dev/xvda"
+    volume_size           = var.vm_data_volume_size.standalone_server
+    volume_type           = var.vm_data_volume_type.standalone_server
+    encrypted             = true
+    delete_on_termination = true
+    tags = merge(var.tags, {
+      "Name" = "${var.name_prefix}-ebd-standalone-${count.index}"
+    })
+  }
+
+  tags = merge(var.tags, {
+    "Name" = "${var.name_prefix}-standalone-${count.index}"
+  })
+}
diff --git a/deploy/terraform/aws/dolphinscheduler-variables.tf b/deploy/terraform/aws/dolphinscheduler-variables.tf
new file mode 100644
index 0000000000..0a37e72424
--- /dev/null
+++ b/deploy/terraform/aws/dolphinscheduler-variables.tf
@@ -0,0 +1,97 @@
+variable "ds_version" {
+  type        = string
+  description = "DolphinScheduler Version"
+  default     = "3.1.1"
+}
+
+variable "ds_ami_name" {
+  type        = string
+  description = "Name of DolphinScheduler AMI"
+  default     = "dolphinscheduler-ami"
+}
+
+variable "ds_component_replicas" {
+  type        = map(number)
+  description = "Replicas of the DolphinScheduler Components"
+  default = {
+    master            = 1
+    worker            = 1
+    alert             = 1
+    api               = 1
+    standalone_server = 0
+  }
+}
+
+## VM settings
+
+variable "vm_instance_type" {
+  type        = map(string)
+  description = "EC2 instance type"
+  default = {
+    master            = "t2.medium"
+    worker            = "t2.medium"
+    alert             = "t2.micro"
+    api               = "t2.small"
+    standalone_server = "t2.small"
+  }
+}
+
+variable "vm_associate_public_ip_address" {
+  type        = map(bool)
+  description = "Associate a public IP address to the EC2 instance"
+  default = {
+    master            = true
+    worker            = true
+    alert             = true
+    api               = true
+    standalone_server = true
+  }
+}
+
+variable "vm_root_volume_size" {
+  type        = map(number)
+  description = "Root Volume size of the EC2 Instance"
+  default = {
+    master            = 30
+    worker            = 30
+    alert             = 30
+    api               = 30
+    standalone_server = 30
+  }
+}
+
+variable "vm_data_volume_size" {
+  type        = map(number)
+  description = "Data volume size of the EC2 Instance"
+  default = {
+    master            = 10
+    worker            = 10
+    alert             = 10
+    api               = 10
+    standalone_server = 10
+  }
+}
+
+variable "vm_root_volume_type" {
+  type        = map(string)
+  description = "Root volume type of the EC2 Instance"
+  default = {
+    master            = "gp2"
+    worker            = "gp2"
+    alert             = "gp2"
+    api               = "gp2"
+    standalone_server = "gp2"
+  }
+}
+
+variable "vm_data_volume_type" {
+  type        = map(string)
+  description = "Data volume type of the EC2 Instance"
+  default = {
+    master            = "gp2"
+    worker            = "gp2"
+    alert             = "gp2"
+    api               = "gp2"
+    standalone_server = "gp2"
+  }
+}
diff --git a/deploy/terraform/aws/dolphinscheduler-worker.tf b/deploy/terraform/aws/dolphinscheduler-worker.tf
new file mode 100644
index 0000000000..4f0270cf36
--- /dev/null
+++ b/deploy/terraform/aws/dolphinscheduler-worker.tf
@@ -0,0 +1,91 @@
+locals {
+  alert_server_ip = var.ds_component_replicas.alert > 0 ? aws_instance.alert[0].private_ip : aws_instance.standalone_server[0].private_ip
+}
+
+resource "aws_security_group" "worker" {
+  name        = "worker_server_sg"
+  description = "Allow incoming connections"
+  vpc_id      = aws_vpc._.id
+  ingress {
+    from_port       = 1234
+    to_port         = 1234
+    protocol        = "tcp"
+    security_groups = [aws_security_group.master.id, aws_security_group.api.id]
+    description     = "Allow incoming HTTP connections"
+  }
+  ingress {
+    from_port   = 22
+    to_port     = 22
+    protocol    = "tcp"
+    cidr_blocks = ["0.0.0.0/0"]
+    description = "Allow incoming SSH connections (Linux)"
+  }
+  egress {
+    from_port   = 0
+    to_port     = 0
+    protocol    = "-1"
+    cidr_blocks = ["0.0.0.0/0"]
+  }
+  tags = merge(var.tags, {
+    "Name" = "${var.name_prefix}-worker-sg"
+  })
+}
+
+data "template_file" "worker_user_data" {
+  template = file("templates/cloud-init.yaml")
+  vars = {
+    "ssh_public_key"             = aws_key_pair.key_pair.public_key
+    "dolphinscheduler_version"   = var.ds_version
+    "dolphinscheduler_component" = "worker-server"
+    "database_address"           = aws_db_instance.database.address
+    "database_port"              = aws_db_instance.database.port
+    "database_name"              = aws_db_instance.database.db_name
+    "database_username"          = aws_db_instance.database.username
+    "database_password"          = aws_db_instance.database.password
+    "zookeeper_connect_string"   = var.zookeeper_connect_string != "" ? var.zookeeper_connect_string : aws_instance.zookeeper[0].private_ip
+    "alert_server_host"          = local.alert_server_ip
+    "s3_access_key_id"           = aws_iam_access_key.s3.id
+    "s3_secret_access_key"       = aws_iam_access_key.s3.secret
+    "s3_region"                  = var.aws_region
+    "s3_bucket_name"             = module.s3_bucket.s3_bucket_id
+    "s3_endpoint"                = ""
+  }
+}
+
+resource "aws_instance" "worker" {
+  count = var.ds_component_replicas.worker
+
+  ami                         = data.aws_ami.dolphinscheduler.id
+  instance_type               = var.vm_instance_type.worker
+  subnet_id                   = aws_subnet.public[0].id
+  vpc_security_group_ids      = [aws_security_group.worker.id]
+  source_dest_check           = false
+  associate_public_ip_address = var.vm_associate_public_ip_address.worker
+
+  user_data = data.template_file.worker_user_data.rendered
+
+  root_block_device {
+    volume_size           = var.vm_root_volume_size.worker
+    volume_type           = var.vm_root_volume_type.worker
+    delete_on_termination = true
+    encrypted             = true
+    tags = merge(var.tags, {
+      "Name" = "${var.name_prefix}-rbd"
+    })
+  }
+
+  ebs_block_device {
+    device_name           = "/dev/xvda"
+    volume_size           = var.vm_data_volume_size.worker
+    volume_type           = var.vm_data_volume_type.worker
+    encrypted             = true
+    delete_on_termination = true
+    tags = merge(var.tags, {
+      "Name" = "${var.name_prefix}-ebd"
+    })
+  }
+
+  tags = merge(var.tags, {
+    "Name" = "${var.name_prefix}-worker-${count.index}"
+  })
+}
diff --git a/deploy/terraform/aws/key-pair-main.tf b/deploy/terraform/aws/key-pair-main.tf
new file mode 100644
index 0000000000..382972cc76
--- /dev/null
+++ b/deploy/terraform/aws/key-pair-main.tf
@@ -0,0 +1,18 @@
+# Generates a secure private key and encodes it as PEM
+resource "tls_private_key" "key_pair" {
+  algorithm = "RSA"
+  rsa_bits  = 4096
+}
+
+# Create the Key Pair
+resource "aws_key_pair" "key_pair" {
+  key_name   = "dolphinscheduler"
+  public_key = tls_private_key.key_pair.public_key_openssh
+}
+
+# Save file
+resource "local_file" "ssh_key" {
+  filename        = "${aws_key_pair.key_pair.key_name}.pem"
+  content         = tls_private_key.key_pair.private_key_pem
+  file_permission = "0700"
+}
diff --git a/deploy/terraform/aws/network-main.tf b/deploy/terraform/aws/network-main.tf
new file mode 100644
index 0000000000..3b68d36b15
--- /dev/null
+++ b/deploy/terraform/aws/network-main.tf
@@ -0,0 +1,68 @@
+resource "aws_vpc" "_" {
+  cidr_block           = var.vpc_cidr
+  enable_dns_hostnames = true
+  tags = merge(var.tags, {
+    "Name" = "${var.name_prefix}-vpc"
+  })
+}
+
+resource "aws_internet_gateway" "_" {
+  vpc_id = aws_vpc._.id
+  tags = merge(var.tags, {
+    "Name" = "${var.name_prefix}-ig"
+  })
+}
+
+resource "aws_subnet" "public" {
+  count             = var.subnet_count.public
+  vpc_id            = aws_vpc._.id
+  cidr_block        = var.public_subnet_cidr_blocks[count.index]
+  availability_zone = data.aws_availability_zones.available.names[count.index]
+  tags = merge(var.tags, {
+    "Name" = "${var.name_prefix}-public-subnet-${count.index}"
+  })
+}
+
+resource "aws_route_table" "public" {
+  vpc_id = aws_vpc._.id
+  route {
+    cidr_block = "0.0.0.0/0"
+    gateway_id = aws_internet_gateway._.id
+  }
+  tags = merge(var.tags, {
+    "Name" = "${var.name_prefix}-public-rt"
+  })
+}
+
+resource "aws_route_table_association" "public" {
+  count          = var.subnet_count.public
+  subnet_id      = aws_subnet.public[count.index].id
+  route_table_id = aws_route_table.public.id
+}
+
+resource "aws_subnet" "private" {
+  count             = var.subnet_count.private
+  vpc_id            = aws_vpc._.id
+  cidr_block        = var.private_subnet_cidr_blocks[count.index]
+  availability_zone = data.aws_availability_zones.available.names[count.index]
+  tags = merge(var.tags, {
+    "Name" = "${var.name_prefix}-private-subnet-${count.index}"
+  })
+}
+
+resource "aws_route_table" "private" {
+  vpc_id = aws_vpc._.id
+  route {
+    cidr_block = "0.0.0.0/0"
+    gateway_id = aws_internet_gateway._.id
+  }
+  tags = merge(var.tags, {
+    "Name" = "${var.name_prefix}-private-rt"
+  })
+}
+
+resource "aws_route_table_association" "private" {
+  count          = var.subnet_count.private
+  subnet_id      = aws_subnet.private[count.index].id
+  route_table_id = aws_route_table.private.id
+}
diff --git a/deploy/terraform/aws/network-variables.tf b/deploy/terraform/aws/network-variables.tf
new file mode 100644
index 0000000000..d901ba9c95
--- /dev/null
+++ b/deploy/terraform/aws/network-variables.tf
@@ -0,0 +1,37 @@
+# VPC Variables
+variable "vpc_cidr" {
+  type        = string
+  description = "CIDR for the VPC"
+  default     = "10.0.0.0/16"
+}
+
+variable "subnet_count" {
+  description = "Number of subnets"
+  type        = map(number)
+  default = {
+    public  = 1,
+    private = 2
+  }
+}
+
+variable "public_subnet_cidr_blocks" {
+  type        = list(string)
+  description = "CIDR blocks for the public subnets"
+  default = [
+    "10.0.1.0/24",
+    "10.0.2.0/24",
+    "10.0.3.0/24",
+    "10.0.4.0/24"
+  ]
+}
+
+variable "private_subnet_cidr_blocks" {
+  description = "Available CIDR blocks for private subnets"
+  type        = list(string)
+  default = [
+    "10.0.101.0/24",
+    "10.0.102.0/24",
+    "10.0.103.0/24",
+    "10.0.104.0/24",
+  ]
+}
diff --git a/deploy/terraform/aws/os-versions.tf b/deploy/terraform/aws/os-versions.tf
new file mode 100644
index 0000000000..5c879c1481
--- /dev/null
+++ b/deploy/terraform/aws/os-versions.tf
@@ -0,0 +1,29 @@
+data "aws_ami" "amazon-linux" {
+  most_recent = true
+  owners      = ["amazon"]
+
+  filter {
+    name   = "virtualization-type"
+    values = ["hvm"]
+  }
+
+  filter {
+    name   = "architecture"
+    values = ["x86_64"]
+  }
+
+  filter {
+    name   = "name"
+    values = ["al2022-ami-*"]
+  }
+}
+
+data "aws_ami" "dolphinscheduler" {
+  most_recent = true
+  owners      = ["self"]
+
+  filter {
+    name   = "name"
+    values = [var.ds_ami_name]
+  }
+}
diff --git a/deploy/terraform/aws/packer/ds-ami-local.pkr.hcl b/deploy/terraform/aws/packer/ds-ami-local.pkr.hcl
new file mode 100644
index 0000000000..0eaef622c8
--- /dev/null
+++ b/deploy/terraform/aws/packer/ds-ami-local.pkr.hcl
@@ -0,0 +1,76 @@
+variable "aws_access_key" {
+  type        = string
+  description = "AWS access key"
+}
+
+variable "aws_secret_key" {
+  type        = string
+  description = "AWS secret key"
+}
+
+variable "aws_region" {
+  type        = string
+  description = "AWS region"
+  default     = "cn-north-1"
+}
+
+variable "ds_tar" {
+  type        = string
+  description = "DolphinScheduler tar file location"
+}
+
+variable "ds_ami_name" {
+  type        = string
+  description = "Name of DolphinScheduler AMI"
+  default     = "dolphinscheduler-ami"
+}
+
+packer {
+  required_plugins {
+    amazon = {
+      version = ">= 0.0.1"
+      source  = "github.com/hashicorp/amazon"
+    }
+  }
+}
+
+source "amazon-ebs" "linux" {
+  access_key    = var.aws_access_key
+  secret_key    = var.aws_secret_key
+  region        = var.aws_region
+  ami_name      = var.ds_ami_name
+  instance_type = "t2.micro"
+  source_ami_filter {
+    filters = {
+      name                = "al2022-ami-*"
+      root-device-type    = "ebs"
+      virtualization-type = "hvm"
+      architecture        = "x86_64"
+    }
+    most_recent = true
+    owners      = ["amazon"]
+  }
+  ssh_username = "ec2-user"
+}
+
+build {
+  name    = "dolphinscheduler-ami"
+  sources = ["source.amazon-ebs.linux"]
+
+  provisioner "file" {
+    source      = var.ds_tar
+    destination = "~/dolphinscheduler.tar.gz"
+  }
+
+  provisioner "shell" {
+    inline = [
+      "sudo yum remove -y java",
+      "sudo yum install -y java-1.8.0-amazon-corretto.x86_64",
+      "echo 'export JAVA_HOME=/etc/alternatives/jre' | sudo tee /etc/profile.d/java_home.sh",
+      "sudo mkdir -p /opt/dolphinscheduler",
+      "sudo tar zxvf /home/ec2-user/dolphinscheduler.tar.gz --strip-components 1 -C /opt/dolphinscheduler",
+      "sudo find /opt/dolphinscheduler/ -name start.sh | xargs -I{} sudo chmod +x {}",
+    ]
+  }
+
+}
diff --git a/deploy/terraform/aws/packer/ds-ami-official.pkr.hcl b/deploy/terraform/aws/packer/ds-ami-official.pkr.hcl
new file mode 100644
index 0000000000..3242a951fd
--- /dev/null
+++ b/deploy/terraform/aws/packer/ds-ami-official.pkr.hcl
@@ -0,0 +1,73 @@
+variable "aws_access_key" {
+  type        = string
+  description = "AWS access key"
+}
+
+variable "aws_secret_key" {
+  type        = string
+  description = "AWS secret key"
+}
+
+variable "aws_region" {
+  type        = string
+  description = "AWS region"
+  default     = "cn-north-1"
+}
+
+variable "ds_version" {
+  type        = string
+  description = "DolphinScheduler Version"
+  default     = "3.1.1"
+}
+
+variable "ds_ami_name" {
+  type        = string
+  description = "Name of DolphinScheduler AMI"
+  default     = "dolphinscheduler-ami"
+}
+
+packer {
+  required_plugins {
+    amazon = {
+      version = ">= 0.0.1"
+      source  = "github.com/hashicorp/amazon"
+    }
+  }
+}
+
+source "amazon-ebs" "linux" {
+  access_key    = var.aws_access_key
+  secret_key    = var.aws_secret_key
+  region        = var.aws_region
+  ami_name      = var.ds_ami_name
+  instance_type = "t2.micro"
+  source_ami_filter {
+    filters = {
+      name                = "al2022-ami-*"
+      root-device-type    = "ebs"
+      virtualization-type = "hvm"
+      architecture        = "x86_64"
+    }
+    most_recent = true
+    owners      = ["amazon"]
+  }
+  ssh_username = "ec2-user"
+}
+
+build {
+  name    = "dolphinscheduler-ami"
+  sources = [
+    "source.amazon-ebs.linux"
+  ]
+
+  provisioner "shell" {
+    inline = [
+      "sudo yum remove -y java",
+      "sudo yum install -y java-1.8.0-amazon-corretto.x86_64",
+      "echo 'export JAVA_HOME=/etc/alternatives/jre' | sudo tee /etc/profile.d/java_home.sh",
+      "sudo mkdir -p /opt/dolphinscheduler",
+      "curl -Ls http://archive.apache.org/dist/dolphinscheduler/${var.ds_version}/apache-dolphinscheduler-${var.ds_version}-bin.tar.gz | sudo tar zxvf - --strip-components 1 -C /opt/dolphinscheduler",
+      "sudo find /opt/dolphinscheduler/ -name start.sh | xargs -I{} sudo chmod +x {}",
+    ]
+  }
+}
diff --git a/deploy/terraform/aws/provider-main.tf b/deploy/terraform/aws/provider-main.tf
new file mode 100644
index 0000000000..3ab0d82e89
--- /dev/null
+++ b/deploy/terraform/aws/provider-main.tf
@@ -0,0 +1,9 @@
+provider "aws" {
+  access_key = var.aws_access_key
+  secret_key = var.aws_secret_key
+  region     = var.aws_region
+}
+
+data "aws_availability_zones" "available" {
+  state = "available"
+}
diff --git a/deploy/terraform/aws/provider-variables.tf b/deploy/terraform/aws/provider-variables.tf
new file mode 100644
index 0000000000..dd4a107fbc
--- /dev/null
+++ b/deploy/terraform/aws/provider-variables.tf
@@ -0,0 +1,29 @@
+variable "aws_access_key" {
+  type        = string
+  description = "AWS access key"
+}
+
+variable "aws_secret_key" {
+  type        = string
+  description = "AWS secret key"
+}
+
+variable "aws_region" {
+  type        = string
+  description = "AWS region"
+  default     = "cn-north-1"
+}
+
+variable "name_prefix" {
+  type        = string
+  description = "Name prefix for all resources"
+  default     = "dolphinscheduler"
+}
+
+variable "tags" {
+  type        = map(string)
+  description = "Tags to apply to all resources"
+  default = {
+    "Deployment" = "Test"
+  }
+}
diff --git a/deploy/terraform/aws/rds-main.tf b/deploy/terraform/aws/rds-main.tf
new file mode 100644
index 0000000000..bbf74fbd14
--- /dev/null
+++ b/deploy/terraform/aws/rds-main.tf
@@ -0,0 +1,37 @@
+resource "aws_security_group" "database_sg" {
+  name        = "dolphinscheduler-database"
+  vpc_id      = aws_vpc._.id
+  description = "Allow all inbound for Postgres"
+  ingress {
+    from_port = 5432
+    to_port   = 5432
+    protocol  = "tcp"
+    security_groups = [
+      aws_security_group.master.id,
+      aws_security_group.worker.id,
+      aws_security_group.alert.id,
+      aws_security_group.api.id,
+      aws_security_group.standalone.id
+    ]
+  }
+}
+
+resource "aws_db_subnet_group" "database_subnet_group" {
+  name       = "dolphinscheduler-database_subnet_group"
+  subnet_ids = [for subnet in aws_subnet.private : subnet.id]
+}
+
+resource "aws_db_instance" "database" {
+  identifier             = "dolphinscheduler"
+  db_name                = "dolphinscheduler"
+  instance_class         = var.db_instance_class
+  allocated_storage      = 5
+  engine                 = "postgres"
+  engine_version         = "14.5"
+  skip_final_snapshot    = true
+  db_subnet_group_name   = aws_db_subnet_group.database_subnet_group.id
+  publicly_accessible    = true
+  vpc_security_group_ids = [aws_security_group.database_sg.id]
+  username               = var.db_username
+  password               = var.db_password
+}
diff --git a/deploy/terraform/aws/rds-output.tf b/deploy/terraform/aws/rds-output.tf
new file mode 100644
index 0000000000..66d5281298
--- /dev/null
+++ b/deploy/terraform/aws/rds-output.tf
@@ -0,0 +1,14 @@
+output "db_address" {
+  value = aws_db_instance.database.address
+  description = "Database address"
+}
+
+output "db_port" {
+  value = aws_db_instance.database.port
+  description = "Database port"
+}
+
+output "db_name" {
+  value = aws_db_instance.database.db_name
+  description = "Database name"
+}
diff --git a/deploy/terraform/aws/rds-variables.tf b/deploy/terraform/aws/rds-variables.tf
new file mode 100644
index 0000000000..9c3e6a2db9
--- /dev/null
+++ b/deploy/terraform/aws/rds-variables.tf
@@ -0,0 +1,13 @@
+variable "db_password" {
+  description = "Database password"
+  type        = string
+}
+variable "db_username" {
+  description = "Database username"
+  type        = string
+  default     = "dolphinscheduler"
+}
+variable "db_instance_class" {
+  description = "Database instance class"
+  default     = "db.t3.micro"
+}
diff --git a/deploy/terraform/aws/s3-main.tf b/deploy/terraform/aws/s3-main.tf
new file mode 100644
index 0000000000..574c7f6926
--- /dev/null
+++ b/deploy/terraform/aws/s3-main.tf
@@ -0,0 +1,53 @@
+module "s3_bucket" {
+  source  = "terraform-aws-modules/s3-bucket/aws"
+  version = "~> 3.6"
+
+  bucket_prefix = var.s3_bucket_prefix
+  acl           = "private"
+  force_destroy = true
+  attach_policy = true
+  policy        = data.aws_iam_policy_document.s3.json
+}
+
+resource "aws_iam_user" "s3" {
+  name = "${var.name_prefix}-s3"
+  path = "/dolphinscheduler/"
+}
+
+resource "aws_iam_access_key" "s3" {
+  user = aws_iam_user.s3.name
+}
+
+data "aws_iam_policy_document" "s3" {
+  statement {
+    principals {
+      type        = "AWS"
+      identifiers = [aws_iam_user.s3.arn]
+    }
+
+    actions = ["s3:*"]
+
+    resources = [
+      "${module.s3_bucket.s3_bucket_arn}",
+      "${module.s3_bucket.s3_bucket_arn}/*"
+    ]
+  }
+}
+
+resource "aws_iam_user_policy" "s3" {
+  name = "${var.name_prefix}-s3"
+  user = aws_iam_user.s3.name
+
+  policy = jsonencode({
+    Version = "2012-10-17"
+    Statement = [
+      {
+        Action = [
+          "s3:*",
+        ]
+        Effect   = "Allow"
+        Resource = "*"
+      },
+    ]
+  })
+}
diff --git a/deploy/terraform/aws/s3-outputs.tf b/deploy/terraform/aws/s3-outputs.tf
new file mode 100644
index 0000000000..d9c9a613ee
--- /dev/null
+++ b/deploy/terraform/aws/s3-outputs.tf
@@ -0,0 +1,25 @@
+output "s3_address" {
+  value = module.s3_bucket.s3_bucket_bucket_domain_name
+  description = "S3 address"
+}
+
+output "s3_access_key" {
+  value = aws_iam_access_key.s3.id
+  description = "S3 access key"
+}
+
+output "s3_secret" {
+  value     = aws_iam_access_key.s3.secret
+  sensitive = true
+  description = "S3 access secret"
+}
+
+output "s3_bucket" {
+  value = module.s3_bucket.s3_bucket_id
+  description = "S3 bucket name"
+}
+
+output "s3_regional_domain_name" {
+  value = module.s3_bucket.s3_bucket_bucket_regional_domain_name
+  description = "S3 regional domain name"
+}
diff --git a/deploy/terraform/aws/s3-variables.tf b/deploy/terraform/aws/s3-variables.tf
new file mode 100644
index 0000000000..8230cae00f
--- /dev/null
+++ b/deploy/terraform/aws/s3-variables.tf
@@ -0,0 +1,4 @@
+variable "s3_bucket_prefix" {
+  type    = string
+  default = "dolphinscheduler-test-"
+}
diff --git a/deploy/terraform/aws/templates/cloud-init.yaml b/deploy/terraform/aws/templates/cloud-init.yaml
new file mode 100644
index 0000000000..218379f1a3
--- /dev/null
+++ b/deploy/terraform/aws/templates/cloud-init.yaml
@@ -0,0 +1,72 @@
+#cloud-config
+
+groups:
+  - ds
+
+users:
+  - name: ds
+    shell: /bin/bash
+    primary_group: ds
+    sudo: ALL=(ALL) NOPASSWD:ALL
+    ssh_authorized_keys:
+      - ${ssh_public_key}
+  - name: root
+    ssh_authorized_keys:
+      - ${ssh_public_key}
+
+write_files:
+  - path: /etc/systemd/system/dolphinscheduler-schema.service
+    content: |
+      [Unit]
+      Description=DolphinScheduler service to init database schema
+
+      [Service]
+      Type=oneshot
+      RemainAfterExit=yes
+      Environment="DATABASE=postgresql"
+      Environment="SPRING_PROFILES_ACTIVE=postgresql"
+      Environment="SPRING_DATASOURCE_URL=jdbc:postgresql://${database_address}:${database_port}/${database_name}"
+      Environment="SPRING_DATASOURCE_USERNAME=${database_username}"
+      Environment="SPRING_DATASOURCE_PASSWORD=${database_password}"
+      User=ds
+      WorkingDirectory=/opt/dolphinscheduler
+      ExecStart=bash -l /opt/dolphinscheduler/tools/bin/upgrade-schema.sh
+
+      [Install]
+      WantedBy=multi-user.target
+  - path: /etc/systemd/system/dolphinscheduler.service
+    content: |
+      [Unit]
+      Description=DolphinScheduler Service ${dolphinscheduler_component}
+      Requires=dolphinscheduler-schema.service
+
+      [Service]
+      Environment="DATABASE=postgresql"
+      Environment="SPRING_PROFILES_ACTIVE=postgresql"
+      Environment="SPRING_DATASOURCE_URL=jdbc:postgresql://${database_address}:${database_port}/${database_name}"
+      Environment="SPRING_DATASOURCE_USERNAME=${database_username}"
+      Environment="SPRING_DATASOURCE_PASSWORD=${database_password}"
+      Environment="REGISTRY_ZOOKEEPER_CONNECT_STRING=${zookeeper_connect_string}"
+      Environment="WORKER_ALERT_LISTEN_HOST=${alert_server_host}"
+      User=ds
+      WorkingDirectory=/opt/dolphinscheduler
+      ExecStart=bash -l /opt/dolphinscheduler/${dolphinscheduler_component}/bin/start.sh
+      Restart=always
+
+      [Install]
+      WantedBy=multi-user.target
+
+packages: []
+
+runcmd:
+  - chown -R ds:ds /opt/dolphinscheduler
+  - find /opt/dolphinscheduler/ -name "start.sh" | xargs -I{} chmod +x {}
+  - find /opt/dolphinscheduler/ -name "common.properties" | xargs -I{} sed -ie "s/^resource.storage.type=.*/resource.storage.type=S3/g" {}
+  - find /opt/dolphinscheduler/ -name "common.properties" | xargs -I{} sed -ie "s/^resource.aws.access.key.id=.*/resource.aws.access.key.id=${s3_access_key_id}/g" {}
+  - find /opt/dolphinscheduler/ -name "common.properties" | xargs -I{} sed -ie "s:^resource.aws.secret.access.key=.*:resource.aws.secret.access.key=${s3_secret_access_key}:g" {}
+  - find /opt/dolphinscheduler/ -name "common.properties" | xargs -I{} sed -ie "s/^resource.aws.region=.*/resource.aws.region=${s3_region}/g" {}
+  - find /opt/dolphinscheduler/ -name "common.properties" | xargs -I{} sed -ie "s/^resource.aws.s3.bucket.name=.*/resource.aws.s3.bucket.name=${s3_bucket_name}/g" {}
+  - find /opt/dolphinscheduler/ -name "common.properties" | xargs -I{} sed -ie "s/^resource.aws.s3.endpoint=.*/resource.aws.s3.endpoint=${s3_endpoint}/g" {}
+  - systemctl enable dolphinscheduler
+  - systemctl start dolphinscheduler-schema
+  - systemctl start dolphinscheduler
diff --git a/deploy/terraform/aws/templates/zookeeper/cloud-init.yaml b/deploy/terraform/aws/templates/zookeeper/cloud-init.yaml
new file mode 100644
index 0000000000..7f37566e5c
--- /dev/null
+++ b/deploy/terraform/aws/templates/zookeeper/cloud-init.yaml
@@ -0,0 +1,29 @@
+#cloud-config
+
+package_update: true
+package_upgrade: true
+
+groups:
+  - docker
+
+users:
+  - name: docker
+    shell: /bin/bash
+    primary_group: docker
+    sudo: ALL=(ALL) NOPASSWD:ALL
+    ssh_authorized_keys:
+      - ${ssh_public_key}
+  - name: root
+    ssh_authorized_keys:
+      - ${ssh_public_key}
+
+system_info:
+  default_user:
+    groups: [docker]
+
+packages:
+  - docker
+
+runcmd:
+  - sudo systemctl enable docker
+  - sudo systemctl start docker
diff --git a/deploy/terraform/aws/zookeeper-main.tf b/deploy/terraform/aws/zookeeper-main.tf
new file mode 100644
index 0000000000..37e953939e
--- /dev/null
+++ b/deploy/terraform/aws/zookeeper-main.tf
@@ -0,0 +1,96 @@
+resource "aws_security_group" "zookeeper_sg" {
+  count = var.zookeeper_connect_string != "" ? 0 : 1
+
+  name        = "zookeeper_sg"
+  description = "Allow incoming connections"
+  vpc_id      = aws_vpc._.id
+  ingress {
+    from_port = 2181
+    to_port   = 2181
+    protocol  = "tcp"
+    security_groups = [
+      aws_security_group.master.id,
+      aws_security_group.worker.id,
+      aws_security_group.alert.id,
+      aws_security_group.api.id,
+      aws_security_group.standalone.id
+    ]
+    description = "Allow incoming HTTP connections"
+  }
+  ingress {
+    from_port   = 22
+    to_port     = 22
+    protocol    = "tcp"
+    cidr_blocks = ["0.0.0.0/0"]
+    description = "Allow incoming SSH connections (Linux)"
+  }
+  egress {
+    from_port   = 0
+    to_port     = 0
+    protocol    = "-1"
+    cidr_blocks = ["0.0.0.0/0"]
+  }
+  tags = merge(var.tags, {
+    "Name" = "${var.name_prefix}-zookeeper-sg-${count.index}"
+  })
+}
+
+data "template_file" "zookeeper_user_data" {
+  template = file("templates/zookeeper/cloud-init.yaml")
+  vars = {
+    "ssh_public_key" = aws_key_pair.key_pair.public_key
+  }
+}
+
+resource "aws_instance" "zookeeper" {
+  count = var.zookeeper_connect_string != "" ? 0 : 1
+
+  ami                         = data.aws_ami.amazon-linux.id
+  instance_type               = var.vm_instance_type.standalone_server
+  subnet_id                   = aws_subnet.public[0].id
+  vpc_security_group_ids      = [aws_security_group.zookeeper_sg[count.index].id]
+  source_dest_check           = false
+  associate_public_ip_address = var.vm_associate_public_ip_address.standalone_server
+
+  user_data = data.template_file.zookeeper_user_data.rendered
+
+  root_block_device {
+    volume_size           = var.vm_root_volume_size.standalone_server
+    volume_type           = var.vm_root_volume_type.standalone_server
+    delete_on_termination = true
+    encrypted             = true
+    tags = merge(var.tags, {
+      "Name" = "${var.name_prefix}-rbd-zookeeper-${count.index}"
+    })
+  }
+
+  ebs_block_device {
+    device_name           = "/dev/xvda"
+    volume_size           = var.vm_data_volume_size.standalone_server
+    volume_type           = var.vm_data_volume_type.standalone_server
+    encrypted             = true
+    delete_on_termination = true
+    tags = merge(var.tags, {
+      "Name" = "${var.name_prefix}-ebd-zookeeper-${count.index}"
+    })
+  }
+
+  connection {
+    type        = "ssh"
+    user        = "root"
+    private_key = tls_private_key.key_pair.private_key_pem
+    host        = self.public_ip
+    timeout     = "30s"
+  }
+
+  provisioner "remote-exec" {
+    inline = [
+      "cloud-init status --wait",
+      "docker run -it --name zookeeper -d -p 2181:2181 zookeeper:3.5"
+    ]
+  }
+
+  tags = merge(var.tags, {
+    "Name" = "${var.name_prefix}-zookeeper-${count.index}"
+  })
+}
diff --git a/deploy/terraform/aws/zookeeper-output.tf b/deploy/terraform/aws/zookeeper-output.tf
new file mode 100644
index 0000000000..dd75ae0c0b
--- /dev/null
+++ b/deploy/terraform/aws/zookeeper-output.tf
@@ -0,0 +1,16 @@
+output "zookeeper_server_instance_id" {
+  value = [for vm in aws_instance.zookeeper : vm.id]
+  description = "Instance IDs of zookeeper instances"
+}
+output "zookeeper_server_instance_private_ip" {
+  value = [for vm in aws_instance.zookeeper : vm.private_ip]
+  description = "Private IPs of zookeeper instances"
+}
+output "zookeeper_server_instance_public_dns" {
+  value = [for vm in aws_instance.zookeeper : vm.public_dns]
+  description = "Public domain names of zookeeper instances"
+}
+output "zookeeper_server_instance_public_ip" {
+  value = [for vm in aws_instance.zookeeper : vm.public_ip]
+  description = "Public IPs of zookeeper instances"
+}
diff --git a/deploy/terraform/aws/zookeeper-variables.tf b/deploy/terraform/aws/zookeeper-variables.tf
new file mode 100644
index 0000000000..1e0cc905f3
--- /dev/null
+++ b/deploy/terraform/aws/zookeeper-variables.tf
@@ -0,0 +1,5 @@
+variable "zookeeper_connect_string" {
+  type        = string
+  description = "Zookeeper connect string, if empty, will create a single-node zookeeper for demonstration, don't use this in production"
+  default     = ""
+}