You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@pulsar.apache.org by ur...@apache.org on 2022/02/17 08:31:22 UTC

[pulsar-site] branch main updated (83aace2 -> ebae886)

This is an automated email from the ASF dual-hosted git repository.

urfree pushed a change to branch main
in repository https://gitbox.apache.org/repos/asf/pulsar-site.git.


    from 83aace2  update 2.5.x
     new 2a696df  update 2.4.x
     new a241a66  update 2.3.x, 2.2.x
     new 7b09a26  update migrate scripts
     new ebae886  update

The 4 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 site2/website-next/migrate/migrate-chapter.js      |  15 +-
 site2/website-next/migrate/migrate-docs.js         |   5 +
 site2/website-next/migrate/tool/find-md.js         |   2 +-
 .../version-2.2.0/admin-api-brokers.md             |   5 +-
 .../version-2.2.0/admin-api-clusters.md            |   7 +-
 .../version-2.2.0/admin-api-namespaces.md          | 124 ++--
 .../admin-api-non-persistent-topics.md             |   1 -
 .../version-2.2.0/admin-api-overview.md            |   1 -
 .../version-2.2.0/admin-api-partitioned-topics.md  |   1 -
 .../version-2.2.0/admin-api-permissions.md         |   3 +-
 .../version-2.2.0/admin-api-persistent-topics.md   |   1 -
 .../version-2.2.0/admin-api-schemas.md             |   1 -
 .../version-2.2.0/admin-api-tenants.md             |  19 +-
 .../version-2.2.0/administration-dashboard.md      |  12 +-
 .../version-2.2.0/administration-geo.md            |  54 +-
 .../version-2.2.0/administration-proxy.md          |   3 +-
 .../version-2.2.0/administration-stats.md          |   1 -
 .../version-2.2.0/administration-zk-bk.md          |   1 -
 .../version-2.2.0/client-libraries-java.md         | 211 +++---
 ...ting-started-clients.md => client-libraries.md} |   0
 .../concepts-architecture-overview.md              |   3 +-
 .../version-2.2.0/concepts-authentication.md       |   1 -
 .../version-2.2.0/concepts-clients.md              |   1 -
 .../version-2.2.0/concepts-messaging.md            | 305 ++++++--
 .../version-2.2.0/concepts-multi-tenancy.md        |   1 -
 .../version-2.2.0/concepts-overview.md             |   1 -
 .../version-2.2.0/concepts-replication.md          |   1 -
 .../version-2.2.0/concepts-schema-registry.md      | 107 ---
 .../version-2.2.0/concepts-tiered-storage.md       |   1 -
 .../version-2.2.0/concepts-topic-compaction.md     |   1 -
 .../version-2.2.0/cookbooks-compaction.md          |   1 -
 .../version-2.2.0/cookbooks-deduplication.md       |   3 +-
 .../version-2.2.0/cookbooks-encryption.md          |   3 +-
 .../version-2.2.0/cookbooks-message-queue.md       |   1 -
 .../version-2.2.0/cookbooks-non-persistent.md      |   1 -
 .../version-2.2.0/cookbooks-partitioned.md         |   1 -
 .../version-2.2.0/cookbooks-retention-expiry.md    | 128 +++-
 .../version-2.2.0/deploy-kubernetes.md             |   1 -
 .../version-2.2.0/deploy-monitoring.md             |  15 +-
 ...nary-protocol.md => develop-binary-protocol.md} |   0
 .../{developing-cpp.md => develop-cpp.md}          |   0
 ...ing-load-manager.md => develop-load-manager.md} |   0
 .../develop-schema.md                              |   0
 .../versioned_docs/version-2.2.0/develop-tools.md  |   1 -
 .../version-2.2.0/developing-schema.md             |  62 --
 .../version-2.2.0/functions-metrics.md             |   1 -
 .../version-2.2.0/functions-state.md               | 192 -----
 .../versioned_docs/version-2.2.0/io-connectors.md  |  18 +-
 .../versioned_docs/version-2.2.0/io-develop.md     |   1 -
 .../versioned_docs/version-2.2.0/pulsar-2.0.md     |   1 -
 .../{reference-pulsar-admin.md => pulsar-admin.md} |   0
 .../version-2.2.0/reference-cli-tools.md           | 160 +++--
 .../version-2.2.0/reference-terminology.md         |   1 -
 .../version-2.2.0/security-athenz.md               |   1 -
 .../version-2.2.0/security-authorization.md        |   1 -
 .../version-2.2.0/security-encryption.md           |   1 -
 .../version-2.2.0/security-overview.md             |   1 -
 .../version-2.2.0/security-tls-authentication.md   |   1 -
 .../version-2.2.0/standalone-docker.md             | 150 ++--
 ...getting-started-standalone.md => standalone.md} |   0
 .../versioned_docs/version-2.2.1/adaptors-kafka.md |   3 +-
 .../versioned_docs/version-2.2.1/adaptors-spark.md |   1 -
 .../versioned_docs/version-2.2.1/adaptors-storm.md |   1 -
 .../version-2.2.1/admin-api-brokers.md             |   5 +-
 .../version-2.2.1/admin-api-clusters.md            |   7 +-
 .../version-2.2.1/admin-api-namespaces.md          | 124 ++--
 .../admin-api-non-persistent-topics.md             |   1 -
 .../version-2.2.1/admin-api-overview.md            |   1 -
 .../version-2.2.1/admin-api-partitioned-topics.md  |   1 -
 .../version-2.2.1/admin-api-permissions.md         |   3 +-
 .../version-2.2.1/admin-api-schemas.md             |   1 -
 .../version-2.2.1/admin-api-tenants.md             |  19 +-
 .../version-2.2.1/administration-dashboard.md      |  12 +-
 .../version-2.2.1/administration-geo.md            |  54 +-
 .../version-2.2.1/administration-proxy.md          |   3 +-
 .../version-2.2.1/administration-stats.md          |   1 -
 .../version-2.2.1/administration-zk-bk.md          |   1 -
 .../version-2.2.1/client-libraries-cpp.md          | 435 +++++++++--
 .../version-2.2.1/client-libraries-python.md       | 318 +++++++--
 .../version-2.2.1/client-libraries.md              |   4 +-
 .../concepts-architecture-overview.md              |   3 +-
 .../version-2.2.1/concepts-authentication.md       |   1 -
 .../version-2.2.1/concepts-messaging.md            | 305 ++++++--
 .../version-2.2.1/concepts-multi-tenancy.md        |   1 -
 .../version-2.2.1/concepts-replication.md          |   1 -
 .../version-2.2.1/concepts-schema-registry.md      | 107 ---
 .../version-2.2.1/concepts-tiered-storage.md       |   1 -
 .../version-2.2.1/concepts-topic-compaction.md     |   1 -
 .../version-2.2.1/cookbooks-compaction.md          |   1 -
 .../version-2.2.1/cookbooks-deduplication.md       |   3 +-
 .../version-2.2.1/cookbooks-encryption.md          |   3 +-
 .../version-2.2.1/cookbooks-message-queue.md       |   1 -
 .../version-2.2.1/cookbooks-non-persistent.md      |   1 -
 .../version-2.2.1/cookbooks-partitioned.md         |   1 -
 .../version-2.2.1/cookbooks-retention-expiry.md    | 128 +++-
 .../version-2.2.1/cookbooks-tiered-storage.md      |   7 +-
 .../versioned_docs/version-2.2.1/deploy-aws.md     |   3 +-
 .../deploy-bare-metal-multi-cluster.md             | 101 +--
 .../versioned_docs/version-2.2.1/deploy-dcos.md    |  56 +-
 .../version-2.2.1/deploy-kubernetes.md             |   1 -
 .../version-2.2.1/deploy-monitoring.md             |  15 +-
 .../version-2.2.1/develop-binary-protocol.md       | 105 ++-
 .../version-2.2.1/develop-load-manager.md          |   1 -
 .../versioned_docs/version-2.2.1/develop-tools.md  |   1 -
 .../versioned_docs/version-2.2.1/functions-api.md  | 794 ---------------------
 .../version-2.2.1/functions-deploying.md           | 256 -------
 .../version-2.2.1/functions-guarantees.md          |  42 --
 .../version-2.2.1/functions-metrics.md             |   1 -
 .../version-2.2.1/functions-quickstart.md          | 459 +-----------
 .../version-2.2.1/functions-state.md               | 192 -----
 .../versioned_docs/version-2.2.1/io-develop.md     |   1 -
 .../versioned_docs/version-2.2.1/pulsar-2.0.md     |   1 -
 .../{reference-pulsar-admin.md => pulsar-admin.md} |   0
 .../version-2.2.1/reference-cli-tools.md           | 160 +++--
 .../version-2.2.1/reference-terminology.md         |   1 -
 .../version-2.2.1/security-athenz.md               |   1 -
 .../version-2.2.1/security-authorization.md        |   1 -
 .../version-2.2.1/security-encryption.md           |   1 -
 .../version-2.2.1/security-extending.md            |   1 -
 .../version-2.2.1/security-overview.md             |   1 -
 .../version-2.2.1/sql-deployment-configurations.md |  94 ++-
 .../version-2.2.1/sql-getting-started.md           |   1 -
 .../versioned_docs/version-2.2.1/sql-overview.md   |   1 -
 ...ting-started-docker.md => standalone-docker.md} |   0
 ...getting-started-standalone.md => standalone.md} |   0
 .../versioned_docs/version-2.3.0/adaptors-spark.md |   1 -
 .../version-2.3.0/admin-api-brokers.md             |   5 +-
 .../version-2.3.0/admin-api-clusters.md            |   7 +-
 .../admin-api-non-persistent-topics.md             |   1 -
 .../version-2.3.0/admin-api-overview.md            |   1 -
 .../version-2.3.0/admin-api-permissions.md         |   3 +-
 .../version-2.3.0/admin-api-persistent-topics.md   |   1 -
 .../version-2.3.0/admin-api-schemas.md             |   1 -
 .../version-2.3.0/admin-api-tenants.md             |  19 +-
 .../version-2.3.0/administration-dashboard.md      |  12 +-
 .../version-2.3.0/administration-geo.md            |  55 +-
 .../version-2.3.0/administration-stats.md          |   1 -
 .../version-2.3.0/client-libraries-java.md         | 211 +++---
 .../version-2.3.0/client-libraries-websocket.md    |  82 ++-
 .../version-2.3.0/client-libraries.md              |   4 +-
 .../concepts-architecture-overview.md              |   3 +-
 .../version-2.3.0/concepts-authentication.md       |   1 -
 .../version-2.3.0/concepts-clients.md              |   1 -
 .../version-2.3.0/concepts-multi-tenancy.md        |   1 -
 .../version-2.3.0/concepts-replication.md          |   1 -
 .../version-2.3.0/concepts-tiered-storage.md       |   1 -
 .../version-2.3.0/concepts-topic-compaction.md     |   1 -
 .../version-2.3.0/cookbooks-compaction.md          |   1 -
 .../version-2.3.0/cookbooks-deduplication.md       |   3 +-
 .../version-2.3.0/cookbooks-encryption.md          |   3 +-
 .../version-2.3.0/cookbooks-message-queue.md       |   1 -
 .../version-2.3.0/cookbooks-non-persistent.md      |   1 -
 .../version-2.3.0/cookbooks-partitioned.md         |   1 -
 .../version-2.3.0/cookbooks-tiered-storage.md      |   7 +-
 .../versioned_docs/version-2.3.0/deploy-aws.md     |   3 +-
 .../deploy-bare-metal-multi-cluster.md             | 101 +--
 .../versioned_docs/version-2.3.0/deploy-dcos.md    |  56 +-
 .../version-2.3.0/deploy-kubernetes.md             |   1 -
 .../version-2.3.0/develop-binary-protocol.md       | 105 ++-
 .../version-2.3.0/develop-load-manager.md          |   1 -
 .../versioned_docs/version-2.3.0/develop-tools.md  |   1 -
 .../versioned_docs/version-2.3.0/functions-api.md  | 794 ---------------------
 .../version-2.3.0/functions-guarantees.md          |  42 --
 .../version-2.3.0/functions-metrics.md             |   1 -
 .../version-2.3.0/functions-overview.md            |   1 -
 .../version-2.3.0/functions-state.md               | 192 -----
 .../versioned_docs/version-2.3.0/io-develop.md     |   1 -
 .../versioned_docs/version-2.3.0/io-overview.md    |   1 -
 .../versioned_docs/version-2.3.0/pulsar-2.0.md     |   1 -
 .../{reference-pulsar-admin.md => pulsar-admin.md} |   0
 .../version-2.3.0/security-athenz.md               |   1 -
 .../version-2.3.0/security-encryption.md           |   1 -
 .../version-2.3.0/security-extending.md            |   1 -
 .../version-2.3.0/security-overview.md             |   1 -
 .../version-2.3.0/security-tls-authentication.md   |   1 -
 .../version-2.3.0/security-token-admin.md          |   1 -
 .../version-2.3.0/sql-deployment-configurations.md |  94 ++-
 .../version-2.3.0/sql-getting-started.md           |   1 -
 .../version-2.3.0/standalone-docker.md             | 150 ++--
 ...getting-started-standalone.md => standalone.md} |   0
 .../versioned_docs/version-2.3.1/adaptors-spark.md |   1 -
 .../versioned_docs/version-2.3.1/adaptors-storm.md |   1 -
 .../version-2.3.1/admin-api-brokers.md             |   5 +-
 .../version-2.3.1/admin-api-clusters.md            |   7 +-
 .../version-2.3.1/admin-api-namespaces.md          | 124 ++--
 .../version-2.3.1/admin-api-overview.md            |   1 -
 .../version-2.3.1/admin-api-partitioned-topics.md  |   1 -
 .../version-2.3.1/admin-api-permissions.md         |   3 +-
 .../version-2.3.1/admin-api-persistent-topics.md   |   1 -
 .../version-2.3.1/admin-api-schemas.md             |   1 -
 .../version-2.3.1/admin-api-tenants.md             |  19 +-
 .../version-2.3.1/administration-geo.md            |  55 +-
 .../version-2.3.1/administration-proxy.md          |   3 +-
 .../version-2.3.1/administration-stats.md          |   1 -
 .../version-2.3.1/administration-zk-bk.md          |   1 -
 .../version-2.3.1/client-libraries-websocket.md    |  82 ++-
 .../version-2.3.1/client-libraries.md              |   4 +-
 .../concepts-architecture-overview.md              |   3 +-
 .../version-2.3.1/concepts-authentication.md       |   1 -
 .../version-2.3.1/concepts-clients.md              |   1 -
 .../version-2.3.1/concepts-multi-tenancy.md        |   1 -
 .../version-2.3.1/concepts-replication.md          |   1 -
 .../version-2.3.1/concepts-tiered-storage.md       |   1 -
 .../version-2.3.1/concepts-topic-compaction.md     |   1 -
 .../version-2.3.1/cookbooks-bookkeepermetadata.md  |   1 -
 .../version-2.3.1/cookbooks-compaction.md          |   1 -
 .../version-2.3.1/cookbooks-deduplication.md       |   3 +-
 .../version-2.3.1/cookbooks-encryption.md          |   3 +-
 .../version-2.3.1/cookbooks-message-queue.md       |   1 -
 .../version-2.3.1/cookbooks-non-persistent.md      |   1 -
 .../version-2.3.1/cookbooks-partitioned.md         |   1 -
 .../version-2.3.1/cookbooks-retention-expiry.md    | 128 +++-
 .../version-2.3.1/cookbooks-tiered-storage.md      |   7 +-
 .../versioned_docs/version-2.3.1/deploy-aws.md     |   3 +-
 .../versioned_docs/version-2.3.1/deploy-dcos.md    |  56 +-
 .../version-2.3.1/deploy-kubernetes.md             |   1 -
 .../version-2.3.1/deploy-monitoring.md             |  15 +-
 .../version-2.3.1/develop-binary-protocol.md       | 105 ++-
 .../version-2.3.1/develop-load-manager.md          |   1 -
 .../versioned_docs/version-2.3.1/develop-tools.md  |   1 -
 .../versioned_docs/version-2.3.1/functions-api.md  | 794 ---------------------
 .../version-2.3.1/functions-deploying.md           | 256 -------
 .../version-2.3.1/functions-guarantees.md          |  42 --
 .../version-2.3.1/functions-metrics.md             |   1 -
 .../version-2.3.1/functions-quickstart.md          | 459 +-----------
 .../version-2.3.1/functions-state.md               | 192 -----
 .../versioned_docs/version-2.3.1/io-cdc.md         |   1 -
 .../versioned_docs/version-2.3.1/io-overview.md    |   1 -
 .../versioned_docs/version-2.3.1/pulsar-2.0.md     |   1 -
 .../{reference-pulsar-admin.md => pulsar-admin.md} |   0
 .../version-2.3.1/security-athenz.md               |   1 -
 .../version-2.3.1/security-authorization.md        |   1 -
 .../version-2.3.1/security-encryption.md           |   1 -
 .../version-2.3.1/security-extending.md            |   1 -
 .../version-2.3.1/security-overview.md             |   1 -
 .../version-2.3.1/security-tls-authentication.md   |   1 -
 .../version-2.3.1/sql-deployment-configurations.md |  94 ++-
 .../versioned_docs/version-2.3.1/sql-overview.md   |   1 -
 ...ting-started-docker.md => standalone-docker.md} |   0
 ...getting-started-standalone.md => standalone.md} |   0
 .../versioned_docs/version-2.3.2/adaptors-kafka.md |   3 +-
 .../versioned_docs/version-2.3.2/adaptors-storm.md |   1 -
 .../version-2.3.2/admin-api-brokers.md             |   5 +-
 .../version-2.3.2/admin-api-clusters.md            |   7 +-
 .../admin-api-non-persistent-topics.md             |   1 -
 .../version-2.3.2/admin-api-overview.md            |   1 -
 .../version-2.3.2/admin-api-partitioned-topics.md  |   1 -
 .../version-2.3.2/admin-api-permissions.md         |   3 +-
 .../version-2.3.2/admin-api-persistent-topics.md   |   1 -
 .../version-2.3.2/admin-api-schemas.md             |   2 +-
 .../version-2.3.2/admin-api-tenants.md             |  19 +-
 .../version-2.3.2/administration-dashboard.md      |  12 +-
 .../version-2.3.2/administration-load-balance.md   |  43 +-
 .../version-2.3.2/administration-proxy.md          |   3 +-
 .../version-2.3.2/administration-stats.md          |   1 -
 .../version-2.3.2/administration-zk-bk.md          |   1 -
 .../version-2.3.2/client-libraries-cpp.md          | 435 +++++++++--
 .../version-2.3.2/client-libraries-go.md           | 162 ++++-
 .../version-2.3.2/client-libraries-java.md         | 211 +++---
 .../version-2.3.2/client-libraries-python.md       | 324 +++++++--
 .../version-2.3.2/client-libraries-websocket.md    |  82 ++-
 ...ting-started-clients.md => client-libraries.md} |   0
 .../concepts-architecture-overview.md              |   3 +-
 .../version-2.3.2/concepts-authentication.md       |   1 -
 .../version-2.3.2/concepts-multi-tenancy.md        |   1 -
 .../version-2.3.2/concepts-overview.md             |   1 -
 .../version-2.3.2/concepts-replication.md          |   1 -
 .../version-2.3.2/concepts-tiered-storage.md       |   1 -
 .../version-2.3.2/concepts-topic-compaction.md     |   1 -
 .../version-2.3.2/cookbooks-bookkeepermetadata.md  |   1 -
 .../version-2.3.2/cookbooks-compaction.md          |   1 -
 .../version-2.3.2/cookbooks-deduplication.md       |   3 +-
 .../version-2.3.2/cookbooks-encryption.md          |   3 +-
 .../version-2.3.2/cookbooks-message-queue.md       |   1 -
 .../version-2.3.2/cookbooks-non-persistent.md      |   1 -
 .../version-2.3.2/cookbooks-partitioned.md         |   1 -
 .../version-2.3.2/cookbooks-retention-expiry.md    | 128 +++-
 .../version-2.3.2/cookbooks-tiered-storage.md      |   7 +-
 .../versioned_docs/version-2.3.2/deploy-aws.md     |   3 +-
 .../deploy-bare-metal-multi-cluster.md             | 101 +--
 .../versioned_docs/version-2.3.2/deploy-dcos.md    |  56 +-
 .../version-2.3.2/deploy-kubernetes.md             |   1 -
 .../version-2.3.2/deploy-monitoring.md             |  15 +-
 .../version-2.3.2/develop-binary-protocol.md       | 105 ++-
 .../version-2.3.2/develop-load-manager.md          |   1 -
 .../versioned_docs/version-2.3.2/develop-tools.md  |   1 -
 .../versioned_docs/version-2.3.2/functions-api.md  | 794 ---------------------
 .../version-2.3.2/functions-deploying.md           | 256 -------
 .../version-2.3.2/functions-metrics.md             |   1 -
 .../version-2.3.2/functions-overview.md            |   1 -
 .../version-2.3.2/functions-quickstart.md          | 459 +-----------
 .../version-2.3.2/functions-state.md               | 192 -----
 .../versioned_docs/version-2.3.2/io-cdc.md         |   1 -
 .../versioned_docs/version-2.3.2/io-develop.md     |   1 -
 .../versioned_docs/version-2.3.2/io-overview.md    |   1 -
 .../versioned_docs/version-2.3.2/pulsar-2.0.md     |   1 -
 .../{reference-pulsar-admin.md => pulsar-admin.md} |   0
 .../version-2.3.2/reference-terminology.md         |   1 -
 .../version-2.3.2/security-athenz.md               |   1 -
 .../version-2.3.2/security-authorization.md        |   1 -
 .../version-2.3.2/security-encryption.md           |   1 -
 .../version-2.3.2/security-extending.md            |   1 -
 .../version-2.3.2/security-tls-authentication.md   |   1 -
 .../version-2.3.2/security-token-admin.md          |   1 -
 .../version-2.3.2/sql-deployment-configurations.md |  94 ++-
 .../version-2.3.2/sql-getting-started.md           |   1 -
 .../versioned_docs/version-2.3.2/sql-overview.md   |   1 -
 ...ting-started-docker.md => standalone-docker.md} |   0
 ...getting-started-standalone.md => standalone.md} |   0
 .../versioned_docs/version-2.4.0/adaptors-spark.md |   1 -
 .../versioned_docs/version-2.4.0/adaptors-storm.md |   1 -
 .../version-2.4.0/admin-api-brokers.md             |   5 +-
 .../version-2.4.0/admin-api-clusters.md            |   7 +-
 .../version-2.4.0/admin-api-namespaces.md          | 124 ++--
 .../admin-api-non-persistent-topics.md             |   1 -
 .../version-2.4.0/admin-api-partitioned-topics.md  |   1 -
 .../version-2.4.0/admin-api-permissions.md         |   3 +-
 .../version-2.4.0/admin-api-persistent-topics.md   |   1 -
 .../version-2.4.0/admin-api-schemas.md             |   1 -
 .../version-2.4.0/admin-api-tenants.md             |  19 +-
 .../version-2.4.0/administration-dashboard.md      |  12 +-
 .../version-2.4.0/administration-load-balance.md   |  43 +-
 .../version-2.4.0/administration-proxy.md          |   3 +-
 .../version-2.4.0/administration-stats.md          |   1 -
 .../version-2.4.0/client-libraries-cpp.md          | 435 +++++++++--
 .../version-2.4.0/client-libraries-websocket.md    |  82 ++-
 .../version-2.4.0/client-libraries.md              |   4 +-
 .../concepts-architecture-overview.md              |   3 +-
 .../version-2.4.0/concepts-authentication.md       |   1 -
 .../version-2.4.0/concepts-clients.md              |   1 -
 .../version-2.4.0/concepts-multi-tenancy.md        |   1 -
 .../version-2.4.0/concepts-overview.md             |   1 -
 .../version-2.4.0/concepts-replication.md          |   1 -
 .../version-2.4.0/concepts-topic-compaction.md     |   1 -
 .../version-2.4.0/cookbooks-bookkeepermetadata.md  |   1 -
 .../version-2.4.0/cookbooks-deduplication.md       |   3 +-
 .../version-2.4.0/cookbooks-encryption.md          |   3 +-
 .../version-2.4.0/cookbooks-message-queue.md       |   1 -
 .../version-2.4.0/cookbooks-non-persistent.md      |   1 -
 .../version-2.4.0/cookbooks-partitioned.md         |   1 -
 .../version-2.4.0/cookbooks-retention-expiry.md    | 128 +++-
 .../versioned_docs/version-2.4.0/deploy-aws.md     |   3 +-
 .../deploy-bare-metal-multi-cluster.md             | 101 +--
 .../versioned_docs/version-2.4.0/deploy-dcos.md    |  56 +-
 .../version-2.4.0/deploy-kubernetes.md             |   1 -
 .../version-2.4.0/deploy-monitoring.md             |  15 +-
 ...nary-protocol.md => develop-binary-protocol.md} |   0
 .../develop-cpp.md                                 |   0
 .../version-2.4.0/develop-load-manager.md          |   1 -
 .../versioned_docs/version-2.4.0/develop-tools.md  |   1 -
 .../versioned_docs/version-2.4.0/developing-cpp.md | 114 ---
 .../version-2.4.0/functions-deploying.md           | 256 -------
 .../version-2.4.0/functions-guarantees.md          |  42 --
 .../version-2.4.0/functions-metrics.md             |   1 -
 .../versioned_docs/version-2.4.0/io-cdc.md         |   1 -
 .../versioned_docs/version-2.4.0/io-connectors.md  |  18 +-
 .../versioned_docs/version-2.4.0/io-develop.md     |   1 -
 .../versioned_docs/version-2.4.0/io-overview.md    |   1 -
 .../versioned_docs/version-2.4.0/pulsar-2.0.md     |   1 -
 .../{reference-pulsar-admin.md => pulsar-admin.md} |   0
 .../version-2.4.0/reference-cli-tools.md           |  12 +-
 .../version-2.4.0/reference-terminology.md         |   1 -
 .../version-2.4.0/security-athenz.md               |   1 -
 .../version-2.4.0/security-authorization.md        |   1 -
 .../version-2.4.0/security-encryption.md           |   1 -
 .../version-2.4.0/security-extending.md            |   1 -
 .../version-2.4.0/sql-deployment-configurations.md |  94 ++-
 .../versioned_docs/version-2.4.0/sql-overview.md   |   1 -
 .../version-2.4.0/standalone-docker.md             | 150 ++--
 ...getting-started-standalone.md => standalone.md} |   0
 .../versioned_docs/version-2.4.1/adaptors-kafka.md |   3 +-
 .../versioned_docs/version-2.4.1/adaptors-spark.md |   1 -
 .../versioned_docs/version-2.4.1/adaptors-storm.md |   1 -
 .../version-2.4.1/admin-api-brokers.md             |   5 +-
 .../version-2.4.1/admin-api-clusters.md            |   7 +-
 .../version-2.4.1/admin-api-namespaces.md          | 124 ++--
 .../version-2.4.1/admin-api-permissions.md         |   3 +-
 .../version-2.4.1/admin-api-persistent-topics.md   |   1 -
 .../version-2.4.1/admin-api-schemas.md             |   1 -
 .../version-2.4.1/admin-api-tenants.md             |  19 +-
 .../version-2.4.1/administration-load-balance.md   |  43 +-
 .../version-2.4.1/administration-proxy.md          |   3 +-
 .../version-2.4.1/administration-stats.md          |   1 -
 .../version-2.4.1/administration-zk-bk.md          |   1 -
 .../version-2.4.1/client-libraries-cpp.md          | 435 +++++++++--
 .../version-2.4.1/client-libraries-go.md           | 162 ++++-
 .../version-2.4.1/client-libraries-websocket.md    |  82 ++-
 .../version-2.4.1/client-libraries.md              |   4 +-
 .../concepts-architecture-overview.md              |   3 +-
 .../version-2.4.1/concepts-authentication.md       |   1 -
 .../version-2.4.1/concepts-clients.md              |   1 -
 .../version-2.4.1/concepts-multi-tenancy.md        |   1 -
 .../version-2.4.1/concepts-overview.md             |   1 -
 .../version-2.4.1/concepts-replication.md          |   1 -
 .../version-2.4.1/concepts-topic-compaction.md     |   1 -
 .../version-2.4.1/cookbooks-bookkeepermetadata.md  |   1 -
 .../version-2.4.1/cookbooks-deduplication.md       |   3 +-
 .../version-2.4.1/cookbooks-encryption.md          |   3 +-
 .../version-2.4.1/cookbooks-message-queue.md       |   1 -
 .../version-2.4.1/cookbooks-non-persistent.md      |   1 -
 .../version-2.4.1/cookbooks-partitioned.md         |   1 -
 .../version-2.4.1/cookbooks-retention-expiry.md    | 128 +++-
 .../version-2.4.1/cookbooks-tiered-storage.md      |   7 +-
 .../versioned_docs/version-2.4.1/deploy-aws.md     |   3 +-
 .../deploy-bare-metal-multi-cluster.md             | 101 +--
 .../deploy-bare-metal.md}                          |   0
 .../versioned_docs/version-2.4.1/deploy-dcos.md    |  56 +-
 .../version-2.4.1/deploy-kubernetes.md             |   1 -
 .../version-2.4.1/deploy-monitoring.md             |  15 +-
 .../version-2.4.1/develop-binary-protocol.md       | 105 ++-
 .../version-2.4.1/develop-load-manager.md          |   1 -
 .../versioned_docs/version-2.4.1/develop-tools.md  |   1 -
 .../version-2.4.1/getting-started-standalone.md    | 266 -------
 .../versioned_docs/version-2.4.1/io-cdc.md         |   1 -
 .../versioned_docs/version-2.4.1/io-overview.md    |   1 -
 .../versioned_docs/version-2.4.1/io-quickstart.md  |   2 +-
 .../versioned_docs/version-2.4.1/pulsar-2.0.md     |   1 -
 .../{reference-pulsar-admin.md => pulsar-admin.md} |   0
 .../version-2.4.1/reference-cli-tools.md           | 172 +++--
 .../version-2.4.1/reference-terminology.md         |   1 -
 .../version-2.4.1/security-encryption.md           |   1 -
 .../version-2.4.1/security-extending.md            |   1 -
 .../versioned_docs/version-2.4.1/security-jwt.md   |   1 -
 .../version-2.4.1/security-token-admin.md          |   1 -
 .../version-2.4.1/sql-deployment-configurations.md |  94 ++-
 .../version-2.4.1/sql-getting-started.md           |   1 -
 .../versioned_docs/version-2.4.1/sql-overview.md   |   1 -
 .../version-2.4.1/standalone-docker.md             | 150 ++--
 .../{version-2.5.0 => version-2.4.1}/standalone.md |   0
 .../versioned_docs/version-2.4.2/adaptors-kafka.md |   3 +-
 .../versioned_docs/version-2.4.2/adaptors-spark.md |   1 -
 .../versioned_docs/version-2.4.2/adaptors-storm.md |   1 -
 .../version-2.4.2/admin-api-brokers.md             |   5 +-
 .../version-2.4.2/admin-api-clusters.md            |   7 +-
 .../version-2.4.2/admin-api-namespaces.md          | 124 ++--
 .../version-2.4.2/admin-api-permissions.md         |   3 +-
 .../version-2.4.2/admin-api-persistent-topics.md   |   1 -
 .../version-2.4.2/admin-api-schemas.md             |   1 -
 .../version-2.4.2/admin-api-tenants.md             |  19 +-
 .../version-2.4.2/administration-geo.md            |   2 +-
 .../version-2.4.2/administration-load-balance.md   |  43 +-
 .../version-2.4.2/administration-proxy.md          |   3 +-
 .../version-2.4.2/administration-stats.md          |   1 -
 .../version-2.4.2/administration-zk-bk.md          |   1 -
 .../version-2.4.2/client-libraries-cpp.md          | 435 +++++++++--
 .../version-2.4.2/client-libraries-go.md           | 162 ++++-
 .../version-2.4.2/client-libraries-websocket.md    |  82 ++-
 .../version-2.4.2/client-libraries.md              |   4 +-
 .../concepts-architecture-overview.md              |   3 +-
 .../version-2.4.2/concepts-authentication.md       |   1 -
 .../version-2.4.2/concepts-clients.md              |   1 -
 .../version-2.4.2/concepts-multi-tenancy.md        |   1 -
 .../version-2.4.2/concepts-overview.md             |   1 -
 .../version-2.4.2/concepts-replication.md          |   1 -
 .../version-2.4.2/concepts-topic-compaction.md     |   1 -
 .../version-2.4.2/cookbooks-bookkeepermetadata.md  |   1 -
 .../version-2.4.2/cookbooks-deduplication.md       |   3 +-
 .../version-2.4.2/cookbooks-encryption.md          |   3 +-
 .../version-2.4.2/cookbooks-message-queue.md       |   1 -
 .../version-2.4.2/cookbooks-non-persistent.md      |   1 -
 .../version-2.4.2/cookbooks-partitioned.md         |   1 -
 .../version-2.4.2/cookbooks-retention-expiry.md    | 128 +++-
 .../version-2.4.2/cookbooks-tiered-storage.md      |   7 +-
 .../versioned_docs/version-2.4.2/deploy-aws.md     |   3 +-
 .../deploy-bare-metal-multi-cluster.md             | 101 +--
 .../deploy-bare-metal.md}                          |   0
 .../versioned_docs/version-2.4.2/deploy-dcos.md    |  56 +-
 .../version-2.4.2/deploy-kubernetes.md             |   1 -
 .../version-2.4.2/deploy-monitoring.md             |  15 +-
 .../version-2.4.2/develop-binary-protocol.md       | 105 ++-
 .../version-2.4.2/develop-load-manager.md          |   1 -
 .../versioned_docs/version-2.4.2/develop-tools.md  |   1 -
 .../versioned_docs/version-2.4.2/io-cdc.md         |   1 -
 .../versioned_docs/version-2.4.2/io-overview.md    |   1 -
 .../versioned_docs/version-2.4.2/pulsar-2.0.md     |   1 -
 .../{reference-pulsar-admin.md => pulsar-admin.md} |   0
 .../version-2.4.2/reference-cli-tools.md           | 172 +++--
 .../version-2.4.2/reference-terminology.md         |   1 -
 .../version-2.4.2/security-encryption.md           |   1 -
 .../version-2.4.2/security-extending.md            |   1 -
 .../versioned_docs/version-2.4.2/security-jwt.md   |   1 -
 .../version-2.4.2/security-token-admin.md          |   1 -
 .../version-2.4.2/sql-deployment-configurations.md |  94 ++-
 .../version-2.4.2/sql-getting-started.md           |   1 -
 .../versioned_docs/version-2.4.2/sql-overview.md   |   1 -
 .../version-2.4.2/standalone-docker.md             | 150 ++--
 ...getting-started-standalone.md => standalone.md} |   0
 .../versioned_sidebars/version-2.2.0-sidebars.json | 364 +---------
 .../versioned_sidebars/version-2.3.2-sidebars.json |   4 +
 site2/website-next/versions.json                   |   2 +-
 490 files changed, 8010 insertions(+), 11443 deletions(-)
 rename site2/website-next/versioned_docs/version-2.2.0/{getting-started-clients.md => client-libraries.md} (100%)
 rename site2/website-next/versioned_docs/version-2.2.0/{developing-binary-protocol.md => develop-binary-protocol.md} (100%)
 rename site2/website-next/versioned_docs/version-2.2.0/{developing-cpp.md => develop-cpp.md} (100%)
 rename site2/website-next/versioned_docs/version-2.2.0/{developing-load-manager.md => develop-load-manager.md} (100%)
 copy site2/website-next/versioned_docs/{version-2.9.1 => version-2.2.0}/develop-schema.md (100%)
 delete mode 100644 site2/website-next/versioned_docs/version-2.2.0/developing-schema.md
 rename site2/website-next/versioned_docs/version-2.2.0/{reference-pulsar-admin.md => pulsar-admin.md} (100%)
 rename site2/website-next/versioned_docs/version-2.2.0/{getting-started-standalone.md => standalone.md} (100%)
 rename site2/website-next/versioned_docs/version-2.2.1/{reference-pulsar-admin.md => pulsar-admin.md} (100%)
 rename site2/website-next/versioned_docs/version-2.2.1/{getting-started-docker.md => standalone-docker.md} (100%)
 rename site2/website-next/versioned_docs/version-2.2.1/{getting-started-standalone.md => standalone.md} (100%)
 rename site2/website-next/versioned_docs/version-2.3.0/{reference-pulsar-admin.md => pulsar-admin.md} (100%)
 rename site2/website-next/versioned_docs/version-2.3.0/{getting-started-standalone.md => standalone.md} (100%)
 rename site2/website-next/versioned_docs/version-2.3.1/{reference-pulsar-admin.md => pulsar-admin.md} (100%)
 rename site2/website-next/versioned_docs/version-2.3.1/{getting-started-docker.md => standalone-docker.md} (100%)
 rename site2/website-next/versioned_docs/version-2.3.1/{getting-started-standalone.md => standalone.md} (100%)
 rename site2/website-next/versioned_docs/version-2.3.2/{getting-started-clients.md => client-libraries.md} (100%)
 rename site2/website-next/versioned_docs/version-2.3.2/{reference-pulsar-admin.md => pulsar-admin.md} (100%)
 rename site2/website-next/versioned_docs/version-2.3.2/{getting-started-docker.md => standalone-docker.md} (100%)
 rename site2/website-next/versioned_docs/version-2.3.2/{getting-started-standalone.md => standalone.md} (100%)
 rename site2/website-next/versioned_docs/version-2.4.0/{developing-binary-protocol.md => develop-binary-protocol.md} (100%)
 copy site2/website-next/versioned_docs/{version-2.7.3 => version-2.4.0}/develop-cpp.md (100%)
 delete mode 100644 site2/website-next/versioned_docs/version-2.4.0/developing-cpp.md
 rename site2/website-next/versioned_docs/version-2.4.0/{reference-pulsar-admin.md => pulsar-admin.md} (100%)
 rename site2/website-next/versioned_docs/version-2.4.0/{getting-started-standalone.md => standalone.md} (100%)
 rename site2/website-next/versioned_docs/{version-2.4.2/develop-bare-metal.md => version-2.4.1/deploy-bare-metal.md} (100%)
 delete mode 100644 site2/website-next/versioned_docs/version-2.4.1/getting-started-standalone.md
 rename site2/website-next/versioned_docs/version-2.4.1/{reference-pulsar-admin.md => pulsar-admin.md} (100%)
 copy site2/website-next/versioned_docs/{version-2.5.0 => version-2.4.1}/standalone.md (100%)
 rename site2/website-next/versioned_docs/{version-2.4.1/develop-bare-metal.md => version-2.4.2/deploy-bare-metal.md} (100%)
 rename site2/website-next/versioned_docs/version-2.4.2/{reference-pulsar-admin.md => pulsar-admin.md} (100%)
 rename site2/website-next/versioned_docs/version-2.4.2/{getting-started-standalone.md => standalone.md} (100%)

[pulsar-site] 02/04: update 2.3.x, 2.2.x

Posted by ur...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

urfree pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/pulsar-site.git

commit a241a66cd6cf6b4ccab128bad30dfcb74d33b362
Author: LiLi <ur...@apache.org>
AuthorDate: Thu Feb 17 16:14:09 2022 +0800

    update 2.3.x, 2.2.x
    
    Signed-off-by: LiLi <ur...@apache.org>
---
 .../version-2.2.0/admin-api-brokers.md             |   5 +-
 .../version-2.2.0/admin-api-clusters.md            |   7 +-
 .../version-2.2.0/admin-api-namespaces.md          | 124 ++--
 .../admin-api-non-persistent-topics.md             |   1 -
 .../version-2.2.0/admin-api-overview.md            |   1 -
 .../version-2.2.0/admin-api-partitioned-topics.md  |   1 -
 .../version-2.2.0/admin-api-permissions.md         |   3 +-
 .../version-2.2.0/admin-api-persistent-topics.md   |   1 -
 .../version-2.2.0/admin-api-schemas.md             |   1 -
 .../version-2.2.0/admin-api-tenants.md             |  19 +-
 .../version-2.2.0/administration-dashboard.md      |  12 +-
 .../version-2.2.0/administration-geo.md            |  54 +-
 .../version-2.2.0/administration-proxy.md          |   3 +-
 .../version-2.2.0/administration-stats.md          |   1 -
 .../version-2.2.0/administration-zk-bk.md          |   1 -
 .../version-2.2.0/client-libraries-java.md         | 211 +++---
 ...ting-started-clients.md => client-libraries.md} |   0
 .../concepts-architecture-overview.md              |   3 +-
 .../version-2.2.0/concepts-authentication.md       |   1 -
 .../version-2.2.0/concepts-clients.md              |   1 -
 .../version-2.2.0/concepts-messaging.md            | 305 ++++++--
 .../version-2.2.0/concepts-multi-tenancy.md        |   1 -
 .../version-2.2.0/concepts-overview.md             |   1 -
 .../version-2.2.0/concepts-replication.md          |   1 -
 .../version-2.2.0/concepts-schema-registry.md      | 107 ---
 .../version-2.2.0/concepts-tiered-storage.md       |   1 -
 .../version-2.2.0/concepts-topic-compaction.md     |   1 -
 .../version-2.2.0/cookbooks-compaction.md          |   1 -
 .../version-2.2.0/cookbooks-deduplication.md       |   3 +-
 .../version-2.2.0/cookbooks-encryption.md          |   3 +-
 .../version-2.2.0/cookbooks-message-queue.md       |   1 -
 .../version-2.2.0/cookbooks-non-persistent.md      |   1 -
 .../version-2.2.0/cookbooks-partitioned.md         |   1 -
 .../version-2.2.0/cookbooks-retention-expiry.md    | 128 +++-
 .../version-2.2.0/deploy-kubernetes.md             |   1 -
 .../version-2.2.0/deploy-monitoring.md             |  15 +-
 ...nary-protocol.md => develop-binary-protocol.md} |   0
 .../{developing-cpp.md => develop-cpp.md}          |   0
 ...ing-load-manager.md => develop-load-manager.md} |   0
 .../{developing-schema.md => develop-schema.md}    |   0
 .../versioned_docs/version-2.2.0/develop-tools.md  |   1 -
 .../version-2.2.0/functions-metrics.md             |   1 -
 .../version-2.2.0/functions-state.md               | 192 -----
 .../versioned_docs/version-2.2.0/io-connectors.md  |  18 +-
 .../versioned_docs/version-2.2.0/io-develop.md     |   1 -
 .../versioned_docs/version-2.2.0/pulsar-2.0.md     |   1 -
 .../{reference-pulsar-admin.md => pulsar-admin.md} |   0
 .../version-2.2.0/reference-cli-tools.md           | 160 +++--
 .../version-2.2.0/reference-terminology.md         |   1 -
 .../version-2.2.0/security-athenz.md               |   1 -
 .../version-2.2.0/security-authorization.md        |   1 -
 .../version-2.2.0/security-encryption.md           |   1 -
 .../version-2.2.0/security-overview.md             |   1 -
 .../version-2.2.0/security-tls-authentication.md   |   1 -
 .../version-2.2.0/standalone-docker.md             | 150 ++--
 ...getting-started-standalone.md => standalone.md} |   0
 .../versioned_docs/version-2.2.1/adaptors-kafka.md |   3 +-
 .../versioned_docs/version-2.2.1/adaptors-spark.md |   1 -
 .../versioned_docs/version-2.2.1/adaptors-storm.md |   1 -
 .../version-2.2.1/admin-api-brokers.md             |   5 +-
 .../version-2.2.1/admin-api-clusters.md            |   7 +-
 .../version-2.2.1/admin-api-namespaces.md          | 124 ++--
 .../admin-api-non-persistent-topics.md             |   1 -
 .../version-2.2.1/admin-api-overview.md            |   1 -
 .../version-2.2.1/admin-api-partitioned-topics.md  |   1 -
 .../version-2.2.1/admin-api-permissions.md         |   3 +-
 .../version-2.2.1/admin-api-schemas.md             |   1 -
 .../version-2.2.1/admin-api-tenants.md             |  19 +-
 .../version-2.2.1/administration-dashboard.md      |  12 +-
 .../version-2.2.1/administration-geo.md            |  54 +-
 .../version-2.2.1/administration-proxy.md          |   3 +-
 .../version-2.2.1/administration-stats.md          |   1 -
 .../version-2.2.1/administration-zk-bk.md          |   1 -
 .../version-2.2.1/client-libraries-cpp.md          | 435 +++++++++--
 .../version-2.2.1/client-libraries-python.md       | 318 +++++++--
 .../version-2.2.1/client-libraries.md              |   4 +-
 .../concepts-architecture-overview.md              |   3 +-
 .../version-2.2.1/concepts-authentication.md       |   1 -
 .../version-2.2.1/concepts-messaging.md            | 305 ++++++--
 .../version-2.2.1/concepts-multi-tenancy.md        |   1 -
 .../version-2.2.1/concepts-replication.md          |   1 -
 .../version-2.2.1/concepts-schema-registry.md      | 107 ---
 .../version-2.2.1/concepts-tiered-storage.md       |   1 -
 .../version-2.2.1/concepts-topic-compaction.md     |   1 -
 .../version-2.2.1/cookbooks-compaction.md          |   1 -
 .../version-2.2.1/cookbooks-deduplication.md       |   3 +-
 .../version-2.2.1/cookbooks-encryption.md          |   3 +-
 .../version-2.2.1/cookbooks-message-queue.md       |   1 -
 .../version-2.2.1/cookbooks-non-persistent.md      |   1 -
 .../version-2.2.1/cookbooks-partitioned.md         |   1 -
 .../version-2.2.1/cookbooks-retention-expiry.md    | 128 +++-
 .../version-2.2.1/cookbooks-tiered-storage.md      |   7 +-
 .../versioned_docs/version-2.2.1/deploy-aws.md     |   3 +-
 .../deploy-bare-metal-multi-cluster.md             | 101 +--
 .../versioned_docs/version-2.2.1/deploy-dcos.md    |  56 +-
 .../version-2.2.1/deploy-kubernetes.md             |   1 -
 .../version-2.2.1/deploy-monitoring.md             |  15 +-
 .../version-2.2.1/develop-binary-protocol.md       | 105 ++-
 .../version-2.2.1/develop-load-manager.md          |   1 -
 .../versioned_docs/version-2.2.1/develop-tools.md  |   1 -
 .../versioned_docs/version-2.2.1/functions-api.md  | 794 ---------------------
 .../version-2.2.1/functions-deploying.md           | 256 -------
 .../version-2.2.1/functions-guarantees.md          |  42 --
 .../version-2.2.1/functions-metrics.md             |   1 -
 .../version-2.2.1/functions-quickstart.md          | 459 +-----------
 .../version-2.2.1/functions-state.md               | 192 -----
 .../versioned_docs/version-2.2.1/io-develop.md     |   1 -
 .../versioned_docs/version-2.2.1/pulsar-2.0.md     |   1 -
 .../{reference-pulsar-admin.md => pulsar-admin.md} |   0
 .../version-2.2.1/reference-cli-tools.md           | 160 +++--
 .../version-2.2.1/reference-terminology.md         |   1 -
 .../version-2.2.1/security-athenz.md               |   1 -
 .../version-2.2.1/security-authorization.md        |   1 -
 .../version-2.2.1/security-encryption.md           |   1 -
 .../version-2.2.1/security-extending.md            |   1 -
 .../version-2.2.1/security-overview.md             |   1 -
 .../version-2.2.1/sql-deployment-configurations.md |  94 ++-
 .../version-2.2.1/sql-getting-started.md           |   1 -
 .../versioned_docs/version-2.2.1/sql-overview.md   |   1 -
 ...ting-started-docker.md => standalone-docker.md} |   0
 ...getting-started-standalone.md => standalone.md} |   0
 .../versioned_docs/version-2.3.0/adaptors-spark.md |   1 -
 .../version-2.3.0/admin-api-brokers.md             |   5 +-
 .../version-2.3.0/admin-api-clusters.md            |   7 +-
 .../admin-api-non-persistent-topics.md             |   1 -
 .../version-2.3.0/admin-api-overview.md            |   1 -
 .../version-2.3.0/admin-api-permissions.md         |   3 +-
 .../version-2.3.0/admin-api-persistent-topics.md   |   1 -
 .../version-2.3.0/admin-api-schemas.md             |   1 -
 .../version-2.3.0/admin-api-tenants.md             |  19 +-
 .../version-2.3.0/administration-dashboard.md      |  12 +-
 .../version-2.3.0/administration-geo.md            |  55 +-
 .../version-2.3.0/administration-stats.md          |   1 -
 .../version-2.3.0/client-libraries-java.md         | 211 +++---
 .../version-2.3.0/client-libraries-websocket.md    |  82 ++-
 .../version-2.3.0/client-libraries.md              |   4 +-
 .../concepts-architecture-overview.md              |   3 +-
 .../version-2.3.0/concepts-authentication.md       |   1 -
 .../version-2.3.0/concepts-clients.md              |   1 -
 .../version-2.3.0/concepts-multi-tenancy.md        |   1 -
 .../version-2.3.0/concepts-replication.md          |   1 -
 .../version-2.3.0/concepts-tiered-storage.md       |   1 -
 .../version-2.3.0/concepts-topic-compaction.md     |   1 -
 .../version-2.3.0/cookbooks-compaction.md          |   1 -
 .../version-2.3.0/cookbooks-deduplication.md       |   3 +-
 .../version-2.3.0/cookbooks-encryption.md          |   3 +-
 .../version-2.3.0/cookbooks-message-queue.md       |   1 -
 .../version-2.3.0/cookbooks-non-persistent.md      |   1 -
 .../version-2.3.0/cookbooks-partitioned.md         |   1 -
 .../version-2.3.0/cookbooks-tiered-storage.md      |   7 +-
 .../versioned_docs/version-2.3.0/deploy-aws.md     |   3 +-
 .../deploy-bare-metal-multi-cluster.md             | 101 +--
 .../versioned_docs/version-2.3.0/deploy-dcos.md    |  56 +-
 .../version-2.3.0/deploy-kubernetes.md             |   1 -
 .../version-2.3.0/develop-binary-protocol.md       | 105 ++-
 .../version-2.3.0/develop-load-manager.md          |   1 -
 .../versioned_docs/version-2.3.0/develop-tools.md  |   1 -
 .../versioned_docs/version-2.3.0/functions-api.md  | 794 ---------------------
 .../version-2.3.0/functions-guarantees.md          |  42 --
 .../version-2.3.0/functions-metrics.md             |   1 -
 .../version-2.3.0/functions-overview.md            |   1 -
 .../version-2.3.0/functions-state.md               | 192 -----
 .../versioned_docs/version-2.3.0/io-develop.md     |   1 -
 .../versioned_docs/version-2.3.0/io-overview.md    |   1 -
 .../versioned_docs/version-2.3.0/pulsar-2.0.md     |   1 -
 .../{reference-pulsar-admin.md => pulsar-admin.md} |   0
 .../version-2.3.0/security-athenz.md               |   1 -
 .../version-2.3.0/security-encryption.md           |   1 -
 .../version-2.3.0/security-extending.md            |   1 -
 .../version-2.3.0/security-overview.md             |   1 -
 .../version-2.3.0/security-tls-authentication.md   |   1 -
 .../version-2.3.0/security-token-admin.md          |   1 -
 .../version-2.3.0/sql-deployment-configurations.md |  94 ++-
 .../version-2.3.0/sql-getting-started.md           |   1 -
 .../version-2.3.0/standalone-docker.md             | 150 ++--
 ...getting-started-standalone.md => standalone.md} |   0
 .../versioned_docs/version-2.3.1/adaptors-spark.md |   1 -
 .../versioned_docs/version-2.3.1/adaptors-storm.md |   1 -
 .../version-2.3.1/admin-api-brokers.md             |   5 +-
 .../version-2.3.1/admin-api-clusters.md            |   7 +-
 .../version-2.3.1/admin-api-namespaces.md          | 124 ++--
 .../version-2.3.1/admin-api-overview.md            |   1 -
 .../version-2.3.1/admin-api-partitioned-topics.md  |   1 -
 .../version-2.3.1/admin-api-permissions.md         |   3 +-
 .../version-2.3.1/admin-api-persistent-topics.md   |   1 -
 .../version-2.3.1/admin-api-schemas.md             |   1 -
 .../version-2.3.1/admin-api-tenants.md             |  19 +-
 .../version-2.3.1/administration-geo.md            |  55 +-
 .../version-2.3.1/administration-proxy.md          |   3 +-
 .../version-2.3.1/administration-stats.md          |   1 -
 .../version-2.3.1/administration-zk-bk.md          |   1 -
 .../version-2.3.1/client-libraries-websocket.md    |  82 ++-
 .../version-2.3.1/client-libraries.md              |   4 +-
 .../concepts-architecture-overview.md              |   3 +-
 .../version-2.3.1/concepts-authentication.md       |   1 -
 .../version-2.3.1/concepts-clients.md              |   1 -
 .../version-2.3.1/concepts-multi-tenancy.md        |   1 -
 .../version-2.3.1/concepts-replication.md          |   1 -
 .../version-2.3.1/concepts-tiered-storage.md       |   1 -
 .../version-2.3.1/concepts-topic-compaction.md     |   1 -
 .../version-2.3.1/cookbooks-bookkeepermetadata.md  |   1 -
 .../version-2.3.1/cookbooks-compaction.md          |   1 -
 .../version-2.3.1/cookbooks-deduplication.md       |   3 +-
 .../version-2.3.1/cookbooks-encryption.md          |   3 +-
 .../version-2.3.1/cookbooks-message-queue.md       |   1 -
 .../version-2.3.1/cookbooks-non-persistent.md      |   1 -
 .../version-2.3.1/cookbooks-partitioned.md         |   1 -
 .../version-2.3.1/cookbooks-retention-expiry.md    | 128 +++-
 .../version-2.3.1/cookbooks-tiered-storage.md      |   7 +-
 .../versioned_docs/version-2.3.1/deploy-aws.md     |   3 +-
 .../versioned_docs/version-2.3.1/deploy-dcos.md    |  56 +-
 .../version-2.3.1/deploy-kubernetes.md             |   1 -
 .../version-2.3.1/deploy-monitoring.md             |  15 +-
 .../version-2.3.1/develop-binary-protocol.md       | 105 ++-
 .../version-2.3.1/develop-load-manager.md          |   1 -
 .../versioned_docs/version-2.3.1/develop-tools.md  |   1 -
 .../versioned_docs/version-2.3.1/functions-api.md  | 794 ---------------------
 .../version-2.3.1/functions-deploying.md           | 256 -------
 .../version-2.3.1/functions-guarantees.md          |  42 --
 .../version-2.3.1/functions-metrics.md             |   1 -
 .../version-2.3.1/functions-quickstart.md          | 459 +-----------
 .../version-2.3.1/functions-state.md               | 192 -----
 .../versioned_docs/version-2.3.1/io-cdc.md         |   1 -
 .../versioned_docs/version-2.3.1/io-overview.md    |   1 -
 .../versioned_docs/version-2.3.1/pulsar-2.0.md     |   1 -
 .../{reference-pulsar-admin.md => pulsar-admin.md} |   0
 .../version-2.3.1/security-athenz.md               |   1 -
 .../version-2.3.1/security-authorization.md        |   1 -
 .../version-2.3.1/security-encryption.md           |   1 -
 .../version-2.3.1/security-extending.md            |   1 -
 .../version-2.3.1/security-overview.md             |   1 -
 .../version-2.3.1/security-tls-authentication.md   |   1 -
 .../version-2.3.1/sql-deployment-configurations.md |  94 ++-
 .../versioned_docs/version-2.3.1/sql-overview.md   |   1 -
 ...ting-started-docker.md => standalone-docker.md} |   0
 ...getting-started-standalone.md => standalone.md} |   0
 .../versioned_docs/version-2.3.2/adaptors-kafka.md |   3 +-
 .../versioned_docs/version-2.3.2/adaptors-storm.md |   1 -
 .../version-2.3.2/admin-api-brokers.md             |   5 +-
 .../version-2.3.2/admin-api-clusters.md            |   7 +-
 .../admin-api-non-persistent-topics.md             |   1 -
 .../version-2.3.2/admin-api-overview.md            |   1 -
 .../version-2.3.2/admin-api-partitioned-topics.md  |   1 -
 .../version-2.3.2/admin-api-permissions.md         |   3 +-
 .../version-2.3.2/admin-api-persistent-topics.md   |   1 -
 .../version-2.3.2/admin-api-schemas.md             |   2 +-
 .../version-2.3.2/admin-api-tenants.md             |  19 +-
 .../version-2.3.2/administration-dashboard.md      |  12 +-
 .../version-2.3.2/administration-load-balance.md   |  43 +-
 .../version-2.3.2/administration-proxy.md          |   3 +-
 .../version-2.3.2/administration-stats.md          |   1 -
 .../version-2.3.2/administration-zk-bk.md          |   1 -
 .../version-2.3.2/client-libraries-cpp.md          | 435 +++++++++--
 .../version-2.3.2/client-libraries-go.md           | 162 ++++-
 .../version-2.3.2/client-libraries-java.md         | 211 +++---
 .../version-2.3.2/client-libraries-python.md       | 324 +++++++--
 .../version-2.3.2/client-libraries-websocket.md    |  82 ++-
 ...ting-started-clients.md => client-libraries.md} |   0
 .../concepts-architecture-overview.md              |   3 +-
 .../version-2.3.2/concepts-authentication.md       |   1 -
 .../version-2.3.2/concepts-multi-tenancy.md        |   1 -
 .../version-2.3.2/concepts-overview.md             |   1 -
 .../version-2.3.2/concepts-replication.md          |   1 -
 .../version-2.3.2/concepts-tiered-storage.md       |   1 -
 .../version-2.3.2/concepts-topic-compaction.md     |   1 -
 .../version-2.3.2/cookbooks-bookkeepermetadata.md  |   1 -
 .../version-2.3.2/cookbooks-compaction.md          |   1 -
 .../version-2.3.2/cookbooks-deduplication.md       |   3 +-
 .../version-2.3.2/cookbooks-encryption.md          |   3 +-
 .../version-2.3.2/cookbooks-message-queue.md       |   1 -
 .../version-2.3.2/cookbooks-non-persistent.md      |   1 -
 .../version-2.3.2/cookbooks-partitioned.md         |   1 -
 .../version-2.3.2/cookbooks-retention-expiry.md    | 128 +++-
 .../version-2.3.2/cookbooks-tiered-storage.md      |   7 +-
 .../versioned_docs/version-2.3.2/deploy-aws.md     |   3 +-
 .../deploy-bare-metal-multi-cluster.md             | 101 +--
 .../versioned_docs/version-2.3.2/deploy-dcos.md    |  56 +-
 .../version-2.3.2/deploy-kubernetes.md             |   1 -
 .../version-2.3.2/deploy-monitoring.md             |  15 +-
 .../version-2.3.2/develop-binary-protocol.md       | 105 ++-
 .../version-2.3.2/develop-load-manager.md          |   1 -
 .../versioned_docs/version-2.3.2/develop-tools.md  |   1 -
 .../versioned_docs/version-2.3.2/functions-api.md  | 794 ---------------------
 .../version-2.3.2/functions-deploying.md           | 256 -------
 .../version-2.3.2/functions-metrics.md             |   1 -
 .../version-2.3.2/functions-overview.md            |   1 -
 .../version-2.3.2/functions-quickstart.md          | 459 +-----------
 .../version-2.3.2/functions-state.md               | 192 -----
 .../versioned_docs/version-2.3.2/io-cdc.md         |   1 -
 .../versioned_docs/version-2.3.2/io-develop.md     |   1 -
 .../versioned_docs/version-2.3.2/io-overview.md    |   1 -
 .../versioned_docs/version-2.3.2/pulsar-2.0.md     |   1 -
 .../{reference-pulsar-admin.md => pulsar-admin.md} |   0
 .../version-2.3.2/reference-terminology.md         |   1 -
 .../version-2.3.2/security-athenz.md               |   1 -
 .../version-2.3.2/security-authorization.md        |   1 -
 .../version-2.3.2/security-encryption.md           |   1 -
 .../version-2.3.2/security-extending.md            |   1 -
 .../version-2.3.2/security-tls-authentication.md   |   1 -
 .../version-2.3.2/security-token-admin.md          |   1 -
 .../version-2.3.2/sql-deployment-configurations.md |  94 ++-
 .../version-2.3.2/sql-getting-started.md           |   1 -
 .../versioned_docs/version-2.3.2/sql-overview.md   |   1 -
 ...ting-started-docker.md => standalone-docker.md} |   0
 ...getting-started-standalone.md => standalone.md} |   0
 .../versioned_sidebars/version-2.2.0-sidebars.json | 364 +---------
 .../versioned_sidebars/version-2.3.2-sidebars.json |   4 +
 site2/website-next/versions.json                   |   2 +-
 308 files changed, 4643 insertions(+), 9172 deletions(-)

diff --git a/site2/website-next/versioned_docs/version-2.2.0/admin-api-brokers.md b/site2/website-next/versioned_docs/version-2.2.0/admin-api-brokers.md
index dbac453..10a90ca 100644
--- a/site2/website-next/versioned_docs/version-2.2.0/admin-api-brokers.md
+++ b/site2/website-next/versioned_docs/version-2.2.0/admin-api-brokers.md
@@ -2,7 +2,6 @@
 id: admin-api-brokers
 title: Managing Brokers
 sidebar_label: "Brokers"
-original_id: admin-api-brokers
 ---
 
 import Tabs from '@theme/Tabs';
@@ -26,9 +25,9 @@ Pulsar brokers consist of two components:
 
 [Brokers](reference-terminology.md#broker) can be managed via:
 
-* The [`brokers`](reference-pulsar-admin.md#brokers) command of the [`pulsar-admin`](reference-pulsar-admin) tool
+* The `brokers` command of the [`pulsar-admin`](https://pulsar.apache.org/tools/pulsar-admin/) tool
 * The `/admin/v2/brokers` endpoint of the admin {@inject: rest:REST:/} API
-* The `brokers` method of the {@inject: javadoc:PulsarAdmin:/admin/org/apache/pulsar/client/admin/PulsarAdmin.html} object in the [Java API](client-libraries-java)
+* The `brokers` method of the `PulsarAdmin` object in the [Java API](client-libraries-java)
 
 In addition to being configurable when you start them up, brokers can also be [dynamically configured](#dynamic-broker-configuration).
 
diff --git a/site2/website-next/versioned_docs/version-2.2.0/admin-api-clusters.md b/site2/website-next/versioned_docs/version-2.2.0/admin-api-clusters.md
index 972c7e1..8687ae6 100644
--- a/site2/website-next/versioned_docs/version-2.2.0/admin-api-clusters.md
+++ b/site2/website-next/versioned_docs/version-2.2.0/admin-api-clusters.md
@@ -2,7 +2,6 @@
 id: admin-api-clusters
 title: Managing Clusters
 sidebar_label: "Clusters"
-original_id: admin-api-clusters
 ---
 
 import Tabs from '@theme/Tabs';
@@ -13,7 +12,7 @@ import TabItem from '@theme/TabItem';
 >
 > This page only shows **some frequently used operations**.
 >
-> - For the latest and complete information about `Pulsar admin`, including commands, flags, descriptions, and more information, see [Pulsar admin doc](https://pulsar.apache.org/tools/pulsar-admin/).
+> - For the latest and complete information about `Pulsar admin`, including commands, flags, descriptions, and more, see [Pulsar admin doc](https://pulsar.apache.org/tools/pulsar-admin/)
 > 
 > - For the latest and complete information about `REST API`, including parameters, responses, samples, and more, see {@inject: rest:REST:/} API doc.
 > 
@@ -24,9 +23,9 @@ servers (aka [bookies](reference-terminology.md#bookie)), and a [ZooKeeper](http
 
 Clusters can be managed via:
 
-* The [`clusters`](reference-pulsar-admin.md#clusters) command of the [`pulsar-admin`](reference-pulsar-admin) tool
+* The `clusters` command of the [`pulsar-admin`]([reference-pulsar-admin.md](https://pulsar.apache.org/tools/pulsar-admin/)) tool
 * The `/admin/v2/clusters` endpoint of the admin {@inject: rest:REST:/} API
-* The `clusters` method of the {@inject: javadoc:PulsarAdmin:/admin/org/apache/pulsar/client/admin/PulsarAdmin} object in the [Java API](client-libraries-java)
+* The `clusters` method of the `PulsarAdmin` object in the [Java API](client-libraries-java)
 
 ## Clusters resources
 
diff --git a/site2/website-next/versioned_docs/version-2.2.0/admin-api-namespaces.md b/site2/website-next/versioned_docs/version-2.2.0/admin-api-namespaces.md
index 216cb6f..c53fa3c 100644
--- a/site2/website-next/versioned_docs/version-2.2.0/admin-api-namespaces.md
+++ b/site2/website-next/versioned_docs/version-2.2.0/admin-api-namespaces.md
@@ -2,7 +2,6 @@
 id: admin-api-namespaces
 title: Managing Namespaces
 sidebar_label: "Namespaces"
-original_id: admin-api-namespaces
 ---
 
 import Tabs from '@theme/Tabs';
@@ -23,9 +22,9 @@ Pulsar [namespaces](reference-terminology.md#namespace) are logical groupings of
 
 Namespaces can be managed via:
 
-* The [`namespaces`](reference-pulsar-admin.md#clusters) command of the [`pulsar-admin`](reference-pulsar-admin) tool
+* The `namespaces` command of the [`pulsar-admin`](https://pulsar.apache.org/tools/pulsar-admin/) tool
 * The `/admin/v2/namespaces` endpoint of the admin {@inject: rest:REST:/} API
-* The `namespaces` method of the {@inject: javadoc:PulsarAdmin:/admin/org/apache/pulsar/client/admin/PulsarAdmin} object in the [Java API](client-libraries-java)
+* The `namespaces` method of the `PulsarAdmin` object in the [Java API](client-libraries-java)
 
 ## Namespaces resources
 
@@ -49,8 +48,12 @@ $ pulsar-admin namespaces create test-tenant/test-namespace
 </TabItem>
 <TabItem value="REST API">
 
+```
+
 {@inject: endpoint|PUT|/admin/v2/namespaces/:tenant/:namespace|operation/createNamespace?version=@pulsar:version_number@}
 
+```
+
 </TabItem>
 <TabItem value="Java">
 
@@ -105,8 +108,12 @@ $ pulsar-admin namespaces policies test-tenant/test-namespace
 </TabItem>
 <TabItem value="REST API">
 
+```
+
 {@inject: endpoint|GET|/admin/v2/namespaces/:tenant/:namespace|operation/getPolicies?version=@pulsar:version_number@}
 
+```
+
 </TabItem>
 <TabItem value="Java">
 
@@ -142,8 +149,12 @@ test-tenant/ns2
 </TabItem>
 <TabItem value="REST API">
 
+```
+
 {@inject: endpoint|GET|/admin/v2/namespaces/:tenant|operation/getTenantNamespaces?version=@pulsar:version_number@}
 
+```
+
 </TabItem>
 <TabItem value="Java">
 
@@ -177,8 +188,12 @@ $ pulsar-admin namespaces delete test-tenant/ns1
 </TabItem>
 <TabItem value="REST API">
 
+```
+
 {@inject: endpoint|DELETE|/admin/v2/namespaces/:tenant/:namespace|operation/deleteNamespace?version=@pulsar:version_number@}
 
+```
+
 </TabItem>
 <TabItem value="Java">
 
@@ -196,7 +211,7 @@ admin.namespaces().deleteNamespace(namespace);
 
 #### Set replication cluster
 
-It sets replication clusters for a namespace, so Pulsar can internally replicate publish message from one colo to another colo.
+You can set replication clusters for a namespace to enable Pulsar to internally replicate the published messages from one colocation facility to another.
 
 <Tabs 
   defaultValue="pulsar-admin"
@@ -234,7 +249,7 @@ admin.namespaces().setNamespaceReplicationClusters(namespace, clusters);
 
 #### Get replication cluster
 
-It gives a list of replication clusters for a given namespace.
+You can get the list of replication clusters for a given namespace.
 
 <Tabs 
   defaultValue="pulsar-admin"
@@ -281,13 +296,13 @@ admin.namespaces().getNamespaceReplicationClusters(namespace)
 
 Backlog quota helps the broker to restrict bandwidth/storage of a namespace once it reaches a certain threshold limit. Admin can set the limit and take corresponding action after the limit is reached.
 
-  1.  producer_request_hold: broker will hold and not persist produce request payload
+  1.  producer_request_hold: broker holds but not persists produce request payload
 
-  2.  producer_exception: broker disconnects with the client by giving an exception.
+  2.  producer_exception: broker disconnects with the client by giving an exception
 
-  3.  consumer_backlog_eviction: broker will start discarding backlog messages
+  3.  consumer_backlog_eviction: broker starts discarding backlog messages
 
-  Backlog quota restriction can be taken care by defining restriction of backlog-quota-type: destination_storage
+Backlog quota restriction can be taken care by defining restriction of backlog-quota-type: destination_storage.
 
 <Tabs 
   defaultValue="pulsar-admin"
@@ -300,12 +315,6 @@ $ pulsar-admin namespaces set-backlog-quota --limit 10G --limitTime 36000 --poli
 
 ```
 
-```
-
-N/A
-
-```
-
 </TabItem>
 <TabItem value="REST API">
 
@@ -330,7 +339,7 @@ admin.namespaces().setBacklogQuota(namespace, new BacklogQuota(limit, limitTime,
 
 #### Get backlog quota policies
 
-It shows a configured backlog quota for a given namespace.
+You can get a configured backlog quota for a given namespace.
 
 <Tabs 
   defaultValue="pulsar-admin"
@@ -378,7 +387,7 @@ admin.namespaces().getBacklogQuotaMap(namespace);
 
 #### Remove backlog quota policies
 
-It removes backlog quota policies for a given namespace
+You can remove backlog quota policies for a given namespace.
 
 <Tabs 
   defaultValue="pulsar-admin"
@@ -391,12 +400,6 @@ $ pulsar-admin namespaces remove-backlog-quota test-tenant/ns1
 
 ```
 
-```
-
-N/A
-
-```
-
 </TabItem>
 <TabItem value="REST API">
 
@@ -423,7 +426,7 @@ admin.namespaces().removeBacklogQuota(namespace, backlogQuotaType)
 
 #### Set persistence policies
 
-Persistence policies allow to configure persistency-level for all topic messages under a given namespace.
+Persistence policies allow users to configure persistency-level for all topic messages under a given namespace.
 
   -   Bookkeeper-ack-quorum: Number of acks (guaranteed copies) to wait for each entry, default: 0
 
@@ -444,12 +447,6 @@ $ pulsar-admin namespaces set-persistence --bookkeeper-ack-quorum 2 --bookkeeper
 
 ```
 
-```
-
-N/A
-
-```
-
 </TabItem>
 <TabItem value="REST API">
 
@@ -474,7 +471,7 @@ admin.namespaces().setPersistence(namespace,new PersistencePolicies(bookkeeperEn
 
 #### Get persistence policies
 
-It shows the configured persistence policies of a given namespace.
+You can get the configured persistence policies of a given namespace.
 
 <Tabs 
   defaultValue="pulsar-admin"
@@ -537,12 +534,6 @@ $ pulsar-admin namespaces unload --bundle 0x00000000_0xffffffff test-tenant/ns1
 
 ```
 
-```
-
-N/A
-
-```
-
 </TabItem>
 <TabItem value="REST API">
 
@@ -567,8 +558,7 @@ admin.namespaces().unloadNamespaceBundle(namespace, bundle)
 
 #### Split namespace bundles
 
-Each namespace bundle can contain multiple topics and each bundle can be served by only one broker. 
-If a single bundle is creating an excessive load on a broker, an admin splits the bundle using this command permitting one or more of the new bundles to be unloaded thus spreading the load across the brokers.
+One namespace bundle can contain multiple topics but can be served by only one broker. If a single bundle is creating an excessive load on a broker, an admin can split the bundle using the command below, permitting one or more of the new bundles to be unloaded, thus balancing the load across the brokers.
 
 <Tabs 
   defaultValue="pulsar-admin"
@@ -581,12 +571,6 @@ $ pulsar-admin namespaces split-bundle --bundle 0x00000000_0xffffffff test-tenan
 
 ```
 
-```
-
-N/A
-
-```
-
 </TabItem>
 <TabItem value="REST API">
 
@@ -613,7 +597,7 @@ admin.namespaces().splitNamespaceBundle(namespace, bundle)
 
 #### Set message-ttl
 
-It configures message’s time to live (in seconds) duration.
+You can configure the time to live (in seconds) duration for messages. In the example below, the message-ttl is set as 100s.
 
 <Tabs 
   defaultValue="pulsar-admin"
@@ -626,12 +610,6 @@ $ pulsar-admin namespaces set-message-ttl --messageTTL 100 test-tenant/ns1
 
 ```
 
-```
-
-N/A
-
-```
-
 </TabItem>
 <TabItem value="REST API">
 
@@ -656,7 +634,7 @@ admin.namespaces().setNamespaceMessageTTL(namespace, messageTTL)
 
 #### Get message-ttl
 
-It gives a message ttl of configured namespace.
+When the message-ttl for a namespace is set, you can use the command below to get the configured value. This example comtinues the example of the command `set message-ttl`, so the returned value is 100(s).
 
 <Tabs 
   defaultValue="pulsar-admin"
@@ -684,6 +662,12 @@ $ pulsar-admin namespaces get-message-ttl test-tenant/ns1
 
 ```
 
+```
+
+100
+
+```
+
 </TabItem>
 <TabItem value="Java">
 
@@ -693,6 +677,12 @@ admin.namespaces().getNamespaceMessageTTL(namespace)
 
 ```
 
+```
+
+100
+
+```
+
 </TabItem>
 
 </Tabs>
@@ -712,12 +702,6 @@ $ pulsar-admin namespaces remove-message-ttl test-tenant/ns1
 
 ```
 
-```
-
-100
-
-```
-
 </TabItem>
 <TabItem value="REST API">
 
@@ -758,12 +742,6 @@ $ pulsar-admin namespaces clear-backlog --sub my-subscription test-tenant/ns1
 
 ```
 
-```
-
-N/A
-
-```
-
 </TabItem>
 <TabItem value="REST API">
 
@@ -801,12 +779,6 @@ $ pulsar-admin namespaces clear-backlog  --bundle 0x00000000_0xffffffff  --sub m
 
 ```
 
-```
-
-N/A
-
-```
-
 </TabItem>
 <TabItem value="REST API">
 
@@ -842,13 +814,7 @@ Each namespace contains multiple topics and the retention size (storage size) of
 
 ```
 
-$ pulsar-admin set-retention --size 100 --time 10 test-tenant/ns1
-
-```
-
-```
-
-N/A
+$ pulsar-admin namespaces set-retention --size 100 --time 10 test-tenant/ns1
 
 ```
 
@@ -932,9 +898,7 @@ disables the throttling.
 :::note
 
 - If neither `clusterDispatchRate` nor `topicDispatchRate` is configured, dispatch throttling is disabled.
->
 - If `topicDispatchRate` is not configured, `clusterDispatchRate` takes effect.
-> 
 - If `topicDispatchRate` is configured, `topicDispatchRate` takes effect.
 
 :::
diff --git a/site2/website-next/versioned_docs/version-2.2.0/admin-api-non-persistent-topics.md b/site2/website-next/versioned_docs/version-2.2.0/admin-api-non-persistent-topics.md
index 12220de..78dac35 100644
--- a/site2/website-next/versioned_docs/version-2.2.0/admin-api-non-persistent-topics.md
+++ b/site2/website-next/versioned_docs/version-2.2.0/admin-api-non-persistent-topics.md
@@ -2,7 +2,6 @@
 id: admin-api-non-persistent-topics
 title: Managing non-persistent topics
 sidebar_label: "Non-Persistent topics"
-original_id: admin-api-non-persistent-topics
 ---
 
 For details of the content, refer to [manage topics](admin-api-topics).
\ No newline at end of file
diff --git a/site2/website-next/versioned_docs/version-2.2.0/admin-api-overview.md b/site2/website-next/versioned_docs/version-2.2.0/admin-api-overview.md
index 7936a9c..bd1e1f5 100644
--- a/site2/website-next/versioned_docs/version-2.2.0/admin-api-overview.md
+++ b/site2/website-next/versioned_docs/version-2.2.0/admin-api-overview.md
@@ -2,7 +2,6 @@
 id: admin-api-overview
 title: Pulsar admin interface
 sidebar_label: "Overview"
-original_id: admin-api-overview
 ---
 
 import Tabs from '@theme/Tabs';
diff --git a/site2/website-next/versioned_docs/version-2.2.0/admin-api-partitioned-topics.md b/site2/website-next/versioned_docs/version-2.2.0/admin-api-partitioned-topics.md
index 6734586..7221b3d 100644
--- a/site2/website-next/versioned_docs/version-2.2.0/admin-api-partitioned-topics.md
+++ b/site2/website-next/versioned_docs/version-2.2.0/admin-api-partitioned-topics.md
@@ -2,7 +2,6 @@
 id: admin-api-partitioned-topics
 title: Managing partitioned topics
 sidebar_label: "Partitioned topics"
-original_id: admin-api-partitioned-topics
 ---
 
 For details of the content, refer to [manage topics](admin-api-topics).
\ No newline at end of file
diff --git a/site2/website-next/versioned_docs/version-2.2.0/admin-api-permissions.md b/site2/website-next/versioned_docs/version-2.2.0/admin-api-permissions.md
index e2ca469..faedbf1 100644
--- a/site2/website-next/versioned_docs/version-2.2.0/admin-api-permissions.md
+++ b/site2/website-next/versioned_docs/version-2.2.0/admin-api-permissions.md
@@ -2,7 +2,6 @@
 id: admin-api-permissions
 title: Managing permissions
 sidebar_label: "Permissions"
-original_id: admin-api-permissions
 ---
 
 import Tabs from '@theme/Tabs';
@@ -13,7 +12,7 @@ import TabItem from '@theme/TabItem';
 >
 > This page only shows **some frequently used operations**.
 >
-> - For the latest and complete information about `Pulsar admin`, including commands, flags, descriptions, and more information, see [Pulsar admin doc](https://pulsar.apache.org/tools/pulsar-admin/).
+> - For the latest and complete information about `Pulsar admin`, including commands, flags, descriptions, and more, see [Pulsar admin doc](https://pulsar.apache.org/tools/pulsar-admin/)
 > 
 > - For the latest and complete information about `REST API`, including parameters, responses, samples, and more, see {@inject: rest:REST:/} API doc.
 > 
diff --git a/site2/website-next/versioned_docs/version-2.2.0/admin-api-persistent-topics.md b/site2/website-next/versioned_docs/version-2.2.0/admin-api-persistent-topics.md
index b6d293b..8a7abae 100644
--- a/site2/website-next/versioned_docs/version-2.2.0/admin-api-persistent-topics.md
+++ b/site2/website-next/versioned_docs/version-2.2.0/admin-api-persistent-topics.md
@@ -2,7 +2,6 @@
 id: admin-api-persistent-topics
 title: Managing persistent topics
 sidebar_label: "Persistent topics"
-original_id: admin-api-persistent-topics
 ---
 
 For details of the content, refer to [manage topics](admin-api-topics).
\ No newline at end of file
diff --git a/site2/website-next/versioned_docs/version-2.2.0/admin-api-schemas.md b/site2/website-next/versioned_docs/version-2.2.0/admin-api-schemas.md
index 9ffe21f..8399a03 100644
--- a/site2/website-next/versioned_docs/version-2.2.0/admin-api-schemas.md
+++ b/site2/website-next/versioned_docs/version-2.2.0/admin-api-schemas.md
@@ -2,6 +2,5 @@
 id: admin-api-schemas
 title: Managing Schemas
 sidebar_label: "Schemas"
-original_id: admin-api-schemas
 ---
 
diff --git a/site2/website-next/versioned_docs/version-2.2.0/admin-api-tenants.md b/site2/website-next/versioned_docs/version-2.2.0/admin-api-tenants.md
index fe68336..570ac31 100644
--- a/site2/website-next/versioned_docs/version-2.2.0/admin-api-tenants.md
+++ b/site2/website-next/versioned_docs/version-2.2.0/admin-api-tenants.md
@@ -2,7 +2,6 @@
 id: admin-api-tenants
 title: Managing Tenants
 sidebar_label: "Tenants"
-original_id: admin-api-tenants
 ---
 
 import Tabs from '@theme/Tabs';
@@ -13,7 +12,7 @@ import TabItem from '@theme/TabItem';
 >
 > This page only shows **some frequently used operations**.
 >
-> - For the latest and complete information about `Pulsar admin`, including commands, flags, descriptions, and more information, see [Pulsar admin doc](https://pulsar.apache.org/tools/pulsar-admin/).
+> - For the latest and complete information about `Pulsar admin`, including commands, flags, descriptions, and more, see [Pulsar admin doc](https://pulsar.apache.org/tools/pulsar-admin/)
 > 
 > - For the latest and complete information about `REST API`, including parameters, responses, samples, and more, see {@inject: rest:REST:/} API doc.
 > 
@@ -80,22 +79,26 @@ $ pulsar-admin tenants create my-tenant
 
 ```
 
-When creating a tenant, you can assign admin roles using the `-r`/`--admin-roles` flag. You can specify multiple roles as a comma-separated list. Here are some examples:
+When creating a tenant, you can optionally assign admin roles using the `-r`/`--admin-roles`
+flag, and clusters using the `-c`/`--allowed-clusters` flag. You can specify multiple values
+as a comma-separated list. Here are some examples:
 
 ```shell
 
 $ pulsar-admin tenants create my-tenant \
-  --admin-roles role1,role2,role3
+  --admin-roles role1,role2,role3 \
+  --allowed-clusters cluster1
 
 $ pulsar-admin tenants create my-tenant \
   -r role1
+  -c cluster1
 
 ```
 
 </TabItem>
 <TabItem value="REST API">
 
-{@inject: endpoint|POST|/admin/v2/tenants/:tenant|operation/createTenant?version=@pulsar:version_number@}
+{@inject: endpoint|PUT|/admin/v2/tenants/:tenant|operation/createTenant?version=@pulsar:version_number@}
 
 </TabItem>
 <TabItem value="JAVA">
@@ -140,7 +143,7 @@ $ pulsar-admin tenants get my-tenant
 </TabItem>
 <TabItem value="REST API">
 
-{@inject: endpoint|GET|/admin/v2/tenants/:cluster|operation/getTenant?version=@pulsar:version_number@}
+{@inject: endpoint|GET|/admin/v2/tenants/:tenant|operation/getTenant?version=@pulsar:version_number@}
 
 </TabItem>
 <TabItem value="JAVA">
@@ -175,7 +178,7 @@ $ pulsar-admin tenants delete my-tenant
 </TabItem>
 <TabItem value="REST API">
 
-{@inject: endpoint|DELETE|/admin/v2/tenants/:cluster|operation/deleteTenant?version=@pulsar:version_number@}
+{@inject: endpoint|DELETE|/admin/v2/tenants/:tenant|operation/deleteTenant?version=@pulsar:version_number@}
 
 </TabItem>
 <TabItem value="JAVA">
@@ -210,7 +213,7 @@ $ pulsar-admin tenants update my-tenant
 </TabItem>
 <TabItem value="REST API">
 
-{@inject: endpoint|DELETE|/admin/v2/tenants/:cluster|operation/updateTenant?version=@pulsar:version_number@}
+{@inject: endpoint|POST|/admin/v2/tenants/:tenant|operation/updateTenant?version=@pulsar:version_number@}
 
 </TabItem>
 <TabItem value="JAVA">
diff --git a/site2/website-next/versioned_docs/version-2.2.0/administration-dashboard.md b/site2/website-next/versioned_docs/version-2.2.0/administration-dashboard.md
index 514b076..1eb0404 100644
--- a/site2/website-next/versioned_docs/version-2.2.0/administration-dashboard.md
+++ b/site2/website-next/versioned_docs/version-2.2.0/administration-dashboard.md
@@ -7,7 +7,7 @@ original_id: administration-dashboard
 
 :::note
 
-Pulsar dashboard is deprecated. If you want to manage and monitor the stats of your topics, use [Pulsar Manager](administration-pulsar-manager). 
+Pulsar dashboard is deprecated. We recommend you use [Pulsar Manager](administration-pulsar-manager) to manage and monitor the stats of your topics. 
 
 :::
 
@@ -53,17 +53,17 @@ $ docker run -p 80:80 \
 ```
 
  
-You need to specify only one service URL for a Pulsar cluster. Internally, the collector figures out all the existing clusters and the brokers from where it needs to pull the metrics. If you connect the dashboard to Pulsar running in standalone mode, the URL is `http://<broker-ip>:8080` by default. `<broker-ip>` is the ip address or hostname of the machine running Pulsar standalone. The ip address or hostname should be accessible from the docker instance running dashboard.
+You need to specify only one service URL for a Pulsar cluster. Internally, the collector figures out all the existing clusters and the brokers from where it needs to pull the metrics. If you connect the dashboard to Pulsar running in standalone mode, the URL is `http://<broker-ip>:8080` by default. `<broker-ip>` is the IP address or hostname of the machine that runs Pulsar standalone. The IP address or hostname should be accessible from the running dashboard in the docker instance.
 
-Once the Docker container runs, the web dashboard is accessible via `localhost` or whichever host that Docker uses.
+Once the Docker container starts, the web dashboard is accessible via `localhost` or whichever host that Docker uses.
 
-> The `SERVICE_URL` that the dashboard uses needs to be reachable from inside the Docker container
+> The `SERVICE_URL` that the dashboard uses needs to be reachable from inside the Docker container.
 
 If the Pulsar service runs in standalone mode in `localhost`, the `SERVICE_URL` has to
-be the IP of the machine.
+be the IP address of the machine.
 
 Similarly, given the Pulsar standalone advertises itself with localhost by default, you need to
-explicitly set the advertise address to the host IP. For example:
+explicitly set the advertise address to the host IP address. For example:
 
 ```shell
 
diff --git a/site2/website-next/versioned_docs/version-2.2.0/administration-geo.md b/site2/website-next/versioned_docs/version-2.2.0/administration-geo.md
index 9c93a64..d956817 100644
--- a/site2/website-next/versioned_docs/version-2.2.0/administration-geo.md
+++ b/site2/website-next/versioned_docs/version-2.2.0/administration-geo.md
@@ -2,9 +2,12 @@
 id: administration-geo
 title: Pulsar geo-replication
 sidebar_label: "Geo-replication"
-original_id: administration-geo
 ---
 
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
+
+
 *Geo-replication* is the replication of persistently stored message data across multiple clusters of a Pulsar instance.
 
 ## How geo-replication works
@@ -44,8 +47,6 @@ All messages produced in any of the three clusters are delivered to all subscrip
 
 ## Configure replication
 
-As stated in [Geo-replication and Pulsar properties](#geo-replication-and-pulsar-properties) section, geo-replication in Pulsar is managed at the [tenant](reference-terminology.md#tenant) level.
-
 The following example connects three clusters: **us-east**, **us-west**, and **us-cent**.
 
 ### Connect replication clusters
@@ -107,7 +108,11 @@ $ bin/pulsar-admin tenants create my-tenant \
 
 To update permissions of an existing tenant, use `update` instead of `create`.
 
-### Enable geo-replication namespaces
+### Enable geo-replication 
+
+You can enable geo-replication at **namespace** or **topic** level.
+
+#### Enable geo-replication at namespace level
 
 You can create a namespace with the following command sample.
 
@@ -126,11 +131,24 @@ $ bin/pulsar-admin namespaces set-clusters my-tenant/my-namespace \
 
 ```
 
-You can change the replication clusters for a namespace at any time, without disruption to ongoing traffic. Replication channels are immediately set up or stopped in all clusters as soon as the configuration changes.
+#### Enable geo-replication at topic level
 
-### Use topics with geo-replication
+You can set geo-replication at topic level using the command `pulsar-admin topics set-replication-clusters`. For the latest and complete information about `Pulsar admin`, including commands, flags, descriptions, and more information, see [Pulsar admin doc](https://pulsar.apache.org/tools/pulsar-admin/).
+
+```shell
+
+$ bin/pulsar-admin topics set-replication-clusters --clusters us-west,us-east,us-cent my-tenant/my-namespace/my-topic
+
+```
 
-Once you create a geo-replication namespace, any topics that producers or consumers create within that namespace is replicated across clusters. Typically, each application uses the `serviceUrl` for the local cluster.
+:::tip
+
+- You can change the replication clusters for a namespace at any time, without disruption to ongoing traffic. Replication channels are immediately set up or stopped in all clusters as soon as the configuration changes.
+- Once you create a geo-replication namespace, any topics that producers or consumers create within that namespace are replicated across clusters. Typically, each application uses the `serviceUrl` for the local cluster.
+
+:::
+
+### Use topics with geo-replication
 
 #### Selective replication
 
@@ -158,14 +176,30 @@ producer.newMessage()
 
 #### Topic stats
 
-Topic-specific statistics for geo-replication topics are available via the [`pulsar-admin`](reference-pulsar-admin) tool and {@inject: rest:REST:/} API:
+You can check topic-specific statistics for geo-replication topics using one of the following methods.
+
+<Tabs 
+  defaultValue="pulsar-admin"
+  values={[{"label":"pulsar-admin","value":"pulsar-admin"},{"label":"REST API","value":"REST API"}]}>
+<TabItem value="pulsar-admin">
+
+Use the [`pulsar-admin topics stats`](https://pulsar.apache.org/tools/pulsar-admin/) command.
 
 ```shell
 
-$ bin/pulsar-admin persistent stats persistent://my-tenant/my-namespace/my-topic
+$ bin/pulsar-admin topics stats persistent://my-tenant/my-namespace/my-topic
 
 ```
 
+</TabItem>
+<TabItem value="REST API">
+
+{@inject: endpoint|GET|/admin/v2/:schema/:tenant/:namespace/:topic/stats|operation/getStats?version=@pulsar:version_number@}
+
+</TabItem>
+
+</Tabs>
+
 Each cluster reports its own local stats, including the incoming and outgoing replication rates and backlogs.
 
 #### Delete a geo-replication topic
@@ -212,4 +246,4 @@ Consumer<String> consumer = client.newConsumer(Schema.STRING)
 ### Limitations
 
 * When you enable replicated subscription, you're creating a consistent distributed snapshot to establish an association between message ids from different clusters. The snapshots are taken periodically. The default value is `1 second`. It means that a consumer failing over to a different cluster can potentially receive 1 second of duplicates. You can also configure the frequency of the snapshot in the `broker.conf` file.
-* Only the base line cursor position is synced in replicated subscriptions while the individual acknowledgments are not synced. This means the messages acknowledged out-of-order could end up getting delivered again, in the case of a cluster failover.
+* Only the base line cursor position is synced in replicated subscriptions while the individual acknowledgments are not synced. This means the messages acknowledged out-of-order could end up getting delivered again, in the case of a cluster failover.
\ No newline at end of file
diff --git a/site2/website-next/versioned_docs/version-2.2.0/administration-proxy.md b/site2/website-next/versioned_docs/version-2.2.0/administration-proxy.md
index c046ed3..3cef937 100644
--- a/site2/website-next/versioned_docs/version-2.2.0/administration-proxy.md
+++ b/site2/website-next/versioned_docs/version-2.2.0/administration-proxy.md
@@ -2,10 +2,9 @@
 id: administration-proxy
 title: Pulsar proxy
 sidebar_label: "Pulsar proxy"
-original_id: administration-proxy
 ---
 
-Pulsar proxy is an optional gateway. Pulsar proxy is used when direction connections between clients and Pulsar brokers are either infeasible or undesirable. For example, when you run Pulsar in a cloud environment or on [Kubernetes](https://kubernetes.io) or an analogous platform, you can run Pulsar proxy.
+Pulsar proxy is an optional gateway. Pulsar proxy is used when direct connections between clients and Pulsar brokers are either infeasible or undesirable. For example, when you run Pulsar in a cloud environment or on [Kubernetes](https://kubernetes.io) or an analogous platform, you can run Pulsar proxy.
 
 ## Configure the proxy
 
diff --git a/site2/website-next/versioned_docs/version-2.2.0/administration-stats.md b/site2/website-next/versioned_docs/version-2.2.0/administration-stats.md
index ac0c036..2ccd73c 100644
--- a/site2/website-next/versioned_docs/version-2.2.0/administration-stats.md
+++ b/site2/website-next/versioned_docs/version-2.2.0/administration-stats.md
@@ -2,7 +2,6 @@
 id: administration-stats
 title: Pulsar stats
 sidebar_label: "Pulsar statistics"
-original_id: administration-stats
 ---
 
 ## Partitioned topics
diff --git a/site2/website-next/versioned_docs/version-2.2.0/administration-zk-bk.md b/site2/website-next/versioned_docs/version-2.2.0/administration-zk-bk.md
index de10d50..e5f9688 100644
--- a/site2/website-next/versioned_docs/version-2.2.0/administration-zk-bk.md
+++ b/site2/website-next/versioned_docs/version-2.2.0/administration-zk-bk.md
@@ -2,7 +2,6 @@
 id: administration-zk-bk
 title: ZooKeeper and BookKeeper administration
 sidebar_label: "ZooKeeper and BookKeeper"
-original_id: administration-zk-bk
 ---
 
 Pulsar relies on two external systems for essential tasks:
diff --git a/site2/website-next/versioned_docs/version-2.2.0/client-libraries-java.md b/site2/website-next/versioned_docs/version-2.2.0/client-libraries-java.md
index 28504f8..b8150e1 100644
--- a/site2/website-next/versioned_docs/version-2.2.0/client-libraries-java.md
+++ b/site2/website-next/versioned_docs/version-2.2.0/client-libraries-java.md
@@ -2,10 +2,9 @@
 id: client-libraries-java
 title: Pulsar Java client
 sidebar_label: "Java"
-original_id: client-libraries-java
 ---
 
-You can use Pulsar Java client to create Java [producer](#producer), [consumer](#consumer), and [readers](#reader-interface) of messages and to perform [administrative tasks](admin-api-overview). The current version of the Java client is **@pulsar:version@**.
+You can use a Pulsar Java client to create the Java [producer](#producer), [consumer](#consumer), and [readers](#reader) of messages and to perform [administrative tasks](admin-api-overview). The current Java client version is **@pulsar:version@**.
 
 All the methods in [producer](#producer), [consumer](#consumer), and [reader](#reader) of a Java client are thread-safe.
 
@@ -15,7 +14,7 @@ Package | Description | Maven Artifact
 :-------|:------------|:--------------
 [`org.apache.pulsar.client.api`](/api/client) | The producer and consumer API | [org.apache.pulsar:pulsar-client:@pulsar:version@](http://search.maven.org/#artifactdetails%7Corg.apache.pulsar%7Cpulsar-client%7C@pulsar:version@%7Cjar)
 [`org.apache.pulsar.client.admin`](/api/admin) | The Java [admin API](admin-api-overview) | [org.apache.pulsar:pulsar-client-admin:@pulsar:version@](http://search.maven.org/#artifactdetails%7Corg.apache.pulsar%7Cpulsar-client-admin%7C@pulsar:version@%7Cjar)
-`org.apache.pulsar.client.all` |Includes both `pulsar-client` and `pulsar-client-admin`<br /><br /> Both `pulsar-client` and `pulsar-client-admin` are shaded packages and they shade dependencies independently. Consequently, the applications using both `pulsar-client` and `pulsar-client-admin` have redundant shaded classes. It would be troublesome if you introduce new dependencies but forget to update shading rules. <br /><br /> In this case, you can use `pulsar-client-all`, which shades  [...]
+`org.apache.pulsar.client.all` |Include both `pulsar-client` and `pulsar-client-admin`<br /> Both `pulsar-client` and `pulsar-client-admin` are shaded packages and they shade dependencies independently. Consequently, the applications using both `pulsar-client` and `pulsar-client-admin` have redundant shaded classes. It would be troublesome if you introduce new dependencies but forget to update shading rules. <br /> In this case, you can use `pulsar-client-all`, which shades dependencies  [...]
 
 This document focuses only on the client API for producing and consuming messages on Pulsar topics. For how to use the Java admin client, see [Pulsar admin interface](admin-api-overview).
 
@@ -118,35 +117,56 @@ PulsarClient client = PulsarClient.builder()
 
 If you create a client, you can use the `loadConf` configuration. The following parameters are available in `loadConf`.
 
-| Type | Name | <div>Description</div> | Default
+| Name | Type |  <div>Description</div> | Default
 |---|---|---|---
-String | `serviceUrl` |Service URL provider for Pulsar service | None
-String | `authPluginClassName` | Name of the authentication plugin | None
-String | `authParams` | String represents parameters for the authentication plugin <br /><br />**Example**<br /> key1:val1,key2:val2|None
-long|`operationTimeoutMs`|Operation timeout |30000
-long|`statsIntervalSeconds`|Interval between each stats info<br /><br />Stats is activated with positive `statsInterval`<br /><br />Set `statsIntervalSeconds` to 1 second at least |60
-int|`numIoThreads`| The number of threads used for handling connections to brokers | 1 
-int|`numListenerThreads`|The number of threads used for handling message listeners. The listener thread pool is shared across all the consumers and readers using the "listener" model to get messages. For a given consumer, the listener is always invoked from the same thread to ensure ordering. If you want multiple threads to process a single topic, you need to create a [`shared`](https://pulsar.apache.org/docs/en/next/concepts-messaging/#shared) subscription and multiple consumers for thi [...]
-boolean|`useTcpNoDelay`|Whether to use TCP no-delay flag on the connection to disable Nagle algorithm |true
-boolean |`useTls` |Whether to use TLS encryption on the connection| false
-string | `tlsTrustCertsFilePath` |Path to the trusted TLS certificate file|None
-boolean|`tlsAllowInsecureConnection`|Whether the Pulsar client accepts untrusted TLS certificate from broker | false
-boolean | `tlsHostnameVerificationEnable` | Whether to enable TLS hostname verification|false
-int|`concurrentLookupRequest`|The number of concurrent lookup requests allowed to send on each broker connection to prevent overload on broker|5000
-int|`maxLookupRequest`|The maximum number of lookup requests allowed on each broker connection to prevent overload on broker | 50000
-int|`maxNumberOfRejectedRequestPerConnection`|The maximum number of rejected requests of a broker in a certain time frame (30 seconds) after the current connection is closed and the client creates a new connection to connect to a different broker|50
-int|`keepAliveIntervalSeconds`|Seconds of keeping alive interval for each client broker connection|30
-int|`connectionTimeoutMs`|Duration of waiting for a connection to a broker to be established <br /><br />If the duration passes without a response from a broker, the connection attempt is dropped|10000
-int|`requestTimeoutMs`|Maximum duration for completing a request |60000
-int|`defaultBackoffIntervalNanos`| Default duration for a backoff interval | TimeUnit.MILLISECONDS.toNanos(100);
-long|`maxBackoffIntervalNanos`|Maximum duration for a backoff interval|TimeUnit.SECONDS.toNanos(30)
-SocketAddress|`socks5ProxyAddress`|SOCKS5 proxy address | None
-String|`socks5ProxyUsername`|SOCKS5 proxy username | None
-String|`socks5ProxyPassword`|SOCKS5 proxy password | None
+`serviceUrl` | String | Service URL provider for Pulsar service | None
+`authPluginClassName` | String | Name of the authentication plugin | None
+ `authParams` | String | Parameters for the authentication plugin <br /><br />**Example**<br /> key1:val1,key2:val2|None
+`operationTimeoutMs`|long|`operationTimeoutMs`|Operation timeout |30000
+`statsIntervalSeconds`|long|Interval between each stats information<br /><br />Stats is activated with positive `statsInterval`<br /><br />Set `statsIntervalSeconds` to 1 second at least. |60
+`numIoThreads`| int| The number of threads used for handling connections to brokers | 1 
+`numListenerThreads`|int|The number of threads used for handling message listeners. The listener thread pool is shared across all the consumers and readers using the "listener" model to get messages. For a given consumer, the listener is always invoked from the same thread to ensure ordering. If you want multiple threads to process a single topic, you need to create a [`shared`](https://pulsar.apache.org/docs/en/next/concepts-messaging/#shared) subscription and multiple consumers for thi [...]
+`useTcpNoDelay`| boolean| Whether to use TCP no-delay flag on the connection to disable Nagle algorithm |true
+`useTls` |boolean |Whether to use TLS encryption on the connection| false
+ `tlsTrustCertsFilePath` |string |Path to the trusted TLS certificate file|None
+`tlsAllowInsecureConnection`|boolean|Whether the Pulsar client accepts untrusted TLS certificate from broker | false
+`tlsHostnameVerificationEnable` |boolean |  Whether to enable TLS hostname verification|false
+`concurrentLookupRequest`|int|The number of concurrent lookup requests allowed to send on each broker connection to prevent overload on broker|5000
+`maxLookupRequest`|int|The maximum number of lookup requests allowed on each broker connection to prevent overload on broker | 50000
+`maxNumberOfRejectedRequestPerConnection`|int|The maximum number of rejected requests of a broker in a certain time frame (30 seconds) after the current connection is closed and the client creates a new connection to connect to a different broker|50
+`keepAliveIntervalSeconds`|int|Seconds of keeping alive interval for each client broker connection|30
+`connectionTimeoutMs`|int|Duration of waiting for a connection to a broker to be established <br /><br />If the duration passes without a response from a broker, the connection attempt is dropped|10000
+`requestTimeoutMs`|int|Maximum duration for completing a request |60000
+`defaultBackoffIntervalNanos`|int| Default duration for a backoff interval | TimeUnit.MILLISECONDS.toNanos(100);
+`maxBackoffIntervalNanos`|long|Maximum duration for a backoff interval|TimeUnit.SECONDS.toNanos(30)
+`socks5ProxyAddress`|SocketAddress|SOCKS5 proxy address | None
+`socks5ProxyUsername`|string|SOCKS5 proxy username | None
+`socks5ProxyPassword`|string|SOCKS5 proxy password | None
 
 Check out the Javadoc for the {@inject: javadoc:PulsarClient:/client/org/apache/pulsar/client/api/PulsarClient} class for a full list of configurable parameters.
 
-> In addition to client-level configuration, you can also apply [producer](#configuring-producers) and [consumer](#configuring-consumers) specific configuration as described in sections below.
+> In addition to client-level configuration, you can also apply [producer](#configure-producer) and [consumer](#configure-consumer) specific configuration as described in sections below.
+
+### Client memory allocator configuration
+You can set the client memory allocator configurations through Java properties.<br />
+
+| Property | Type |  <div>Description</div> | Default | Available values
+|---|---|---|---|---
+`pulsar.allocator.pooled` | String | If set to `true`, the client uses a direct memory pool. <br /> If set to `false`, the client uses a heap memory without pool | true | <li> true </li> <li> false </li> 
+`pulsar.allocator.exit_on_oom` | String | Whether to exit the JVM when OOM happens | false |  <li> true </li> <li> false </li>
+`pulsar.allocator.leak_detection` | String | Service URL provider for Pulsar service | Disabled | <li> Disabled </li> <li> Simple </li> <li> Advanced </li> <li> Paranoid </li>
+`pulsar.allocator.out_of_memory_policy` | String | When an OOM occurs, the client throws an exception or fallbacks to heap | FallbackToHeap | <li> ThrowException </li> <li> FallbackToHeap </li>
+
+**Example**:
+
+```
+
+-Dpulsar.allocator.pooled=true
+-Dpulsar.allocator.exit_on_oom=false
+-Dpulsar.allocator.leak_detection=Disabled
+-Dpulsar.allocator.out_of_memory_policy=ThrowException
+
+```
 
 ## Producer
 
@@ -163,7 +183,7 @@ producer.send("My message".getBytes());
 
 ```
 
-By default, producers produce messages that consist of byte arrays. You can produce different types by specifying a message [schema](#schemas).
+By default, producers produce messages that consist of byte arrays. You can produce different types by specifying a message [schema](#schema).
 
 ```java
 
@@ -203,25 +223,25 @@ stringProducer.send("My message");
 
 ### Configure producer
 
-If you instantiate a `Producer` object by specifying only a topic name as the example above, use the default configuration for producer. 
+If you instantiate a `Producer` object by specifying only a topic name as the example above, the default configuration of producer is used.
 
 If you create a producer, you can use the `loadConf` configuration. The following parameters are available in `loadConf`.
 
-Type | Name| <div>Description</div>|  Default
+Name| Type |  <div>Description</div>|  Default
 |---|---|---|---
-String|	`topicName`|	Topic name| null|
-String|`producerName`|Producer name| null
-long|`sendTimeoutMs`|Message send timeout in ms.<br /><br />If a message is not acknowledged by a server before the `sendTimeout` expires, an error occurs.|30000
-boolean|`blockIfQueueFull`|If it is set to `true`, when the outgoing message queue is full, the `Send` and `SendAsync` methods of producer block, rather than failing and throwing errors. <br /><br />If it is set to `false`, when the outgoing message queue is full, the `Send` and `SendAsync` methods of producer fail and `ProducerQueueIsFullError` exceptions occur.<br /><br />The `MaxPendingMessages` parameter determines the size of the outgoing message queue.|false
-int|`maxPendingMessages`|The maximum size of a queue holding pending messages.<br /><br />For example, a message waiting to receive an acknowledgment from a [broker](reference-terminology.md#broker). <br /><br />By default, when the queue is full, all calls to the `Send` and `SendAsync` methods fail **unless** you set `BlockIfQueueFull` to `true`.|1000
-int|`maxPendingMessagesAcrossPartitions`|The maximum number of pending messages across partitions. <br /><br />Use the setting to lower the max pending messages for each partition ({@link #setMaxPendingMessages(int)}) if the total number exceeds the configured value.|50000
-MessageRoutingMode|`messageRoutingMode`|Message routing logic for producers on [partitioned topics](concepts-architecture-overview.md#partitioned-topics).<br /><br /> Apply the logic only when setting no key on messages. <br /><br />Available options are as follows: <br /><br /><li>`pulsar.RoundRobinDistribution`: round robin<br /><br /> </li><li>`pulsar.UseSinglePartition`: publish all messages to a single partition<br /><br /></li><li>`pulsar.CustomPartition`: a custom partitioning sch [...]
-HashingScheme|`hashingScheme`|Hashing function determining the partition where you publish a particular message (**partitioned topics only**).<br /><br />Available options are as follows:<br /><br /><li> `pulsar.JavaStringHash`: the equivalent of `String.hashCode()` in Java<br /><br /></li><li> `pulsar.Murmur3_32Hash`: applies the [Murmur3](https://en.wikipedia.org/wiki/MurmurHash) hashing function<br /><br /></li><li>`pulsar.BoostHash`: applies the hashing function from C++'s [Boost](ht [...]
-ProducerCryptoFailureAction|`cryptoFailureAction`|Producer should take action when encryption fails.<br /><br /><li>**FAIL**: if encryption fails, unencrypted messages fail to send.</li><br /><li> **SEND**: if encryption fails, unencrypted messages are sent. </li>|`ProducerCryptoFailureAction.FAIL`
-long|`batchingMaxPublishDelayMicros`|Batching time period of sending messages.|TimeUnit.MILLISECONDS.toMicros(1)
-int|batchingMaxMessages|The maximum number of messages permitted in a batch.|1000
-boolean|`batchingEnabled`|Enable batching of messages. |true
-CompressionType|`compressionType`|Message data compression type used by a producer. <br /><br />Available options:<li>[`LZ4`](https://github.com/lz4/lz4)<br /></li><li>[`ZLIB`](https://zlib.net/)<br /></li><li>[`ZSTD`](https://facebook.github.io/zstd/)<br /></li><li>[`SNAPPY`](https://google.github.io/snappy/)</li>| No compression
+`topicName`| string|		Topic name| null|
+`producerName`| string|Producer name| null
+`sendTimeoutMs`| long|Message send timeout in ms.<br />If a message is not acknowledged by a server before the `sendTimeout` expires, an error occurs.|30000
+`blockIfQueueFull`|boolean|If it is set to `true`, when the outgoing message queue is full, the `Send` and `SendAsync` methods of producer block, rather than failing and throwing errors. <br />If it is set to `false`, when the outgoing message queue is full, the `Send` and `SendAsync` methods of producer fail and `ProducerQueueIsFullError` exceptions occur.<br /><br />The `MaxPendingMessages` parameter determines the size of the outgoing message queue.|false
+`maxPendingMessages`| int|The maximum size of a queue holding pending messages.<br /><br />For example, a message waiting to receive an acknowledgment from a [broker](reference-terminology.md#broker). <br /><br />By default, when the queue is full, all calls to the `Send` and `SendAsync` methods fail **unless** you set `BlockIfQueueFull` to `true`.|1000
+`maxPendingMessagesAcrossPartitions`|int|The maximum number of pending messages across partitions. <br /><br />Use the setting to lower the max pending messages for each partition ({@link #setMaxPendingMessages(int)}) if the total number exceeds the configured value.|50000
+`messageRoutingMode`| MessageRoutingMode|Message routing logic for producers on [partitioned topics](concepts-architecture-overview.md#partitioned-topics).<br /> Apply the logic only when setting no key on messages. <br />Available options are as follows: <br /><li>`pulsar.RoundRobinDistribution`: round robin</li><li>`pulsar.UseSinglePartition`: publish all messages to a single partition</li><li>`pulsar.CustomPartition`: a custom partitioning scheme</li>|<li>`pulsar.RoundRobinDistribution`</li>
+`hashingScheme`| HashingScheme|Hashing function determining the partition where you publish a particular message (**partitioned topics only**).<br />Available options are as follows:<br /><li> `pulsar.JavastringHash`: the equivalent of `string.hashCode()` in Java</li><li> `pulsar.Murmur3_32Hash`: applies the [Murmur3](https://en.wikipedia.org/wiki/MurmurHash) hashing function</li><li>`pulsar.BoostHash`: applies the hashing function from C++'s [Boost](https://www.boost.org/doc/libs/1_62_0 [...]
+`cryptoFailureAction`| ProducerCryptoFailureAction|Producer should take action when encryption fails.<br /><li>**FAIL**: if encryption fails, unencrypted messages fail to send.</li><li> **SEND**: if encryption fails, unencrypted messages are sent.</li> |`ProducerCryptoFailureAction.FAIL`
+`batchingMaxPublishDelayMicros`| long|Batching time period of sending messages.|TimeUnit.MILLISECONDS.toMicros(1)
+`batchingMaxMessages` |int|The maximum number of messages permitted in a batch.|1000
+`batchingEnabled`| boolean|Enable batching of messages. |true
+`compressionType`|CompressionType|Message data compression type used by a producer. <br />Available options:<li>[`LZ4`](https://github.com/lz4/lz4)</li><li>[`ZLIB`](https://zlib.net/)<br /></li><li>[`ZSTD`](https://facebook.github.io/zstd/)</li><li>[`SNAPPY`](https://google.github.io/snappy/)</li>| No compression
 
 You can configure parameters if you do not want to use the default configuration.
 
@@ -240,7 +260,7 @@ Producer<byte[]> producer = client.newProducer()
 
 ### Message routing
 
-When using partitioned topics, you can specify the routing mode whenever you publish messages using a producer. For more information on specifying a routing mode using the Java client, see the [Partitioned Topics](cookbooks-partitioned) cookbook.
+When using partitioned topics, you can specify the routing mode whenever you publish messages using a producer. For more information on specifying a routing mode using the Java client, see the [Partitioned Topics cookbook](cookbooks-partitioned).
 
 ### Async send
 
@@ -279,7 +299,7 @@ You can terminate the builder chain with `sendAsync()` and get a future return.
 
 In Pulsar, consumers subscribe to topics and handle messages that producers publish to those topics. You can instantiate a new [consumer](reference-terminology.md#consumer) by first instantiating a {@inject: javadoc:PulsarClient:/client/org/apache/pulsar/client/api/PulsarClient} object and passing it a URL for a Pulsar broker (as [above](#client-configuration)).
 
-Once you've instantiated a {@inject: javadoc:PulsarClient:/client/org/apache/pulsar/client/api/PulsarClient} object, you can create a {@inject: javadoc:Consumer:/client/org/apache/pulsar/client/api/Consumer} by specifying a [topic](reference-terminology.md#topic) and a [subscription](concepts-messaging.md#subscription-modes).
+Once you've instantiated a {@inject: javadoc:PulsarClient:/client/org/apache/pulsar/client/api/PulsarClient} object, you can create a {@inject: javadoc:Consumer:/client/org/apache/pulsar/client/api/Consumer} by specifying a [topic](reference-terminology.md#topic) and a [subscription](concepts-messaging.md#subscription-types).
 
 ```java
 
@@ -339,29 +359,30 @@ If you instantiate a `Consumer` object by specifying only a topic and subscripti
 
 When you create a consumer, you can use the `loadConf` configuration. The following parameters are available in `loadConf`.
 
-Type | Name| <div>Description</div>|  Default
+ Name|Type | <div>Description</div>|  Default
 |---|---|---|---
-Set&lt;String&gt;|	`topicNames`|	Topic name|	Sets.newTreeSet()
-Pattern|   `topicsPattern`|	Topic pattern	|None
-String|	`subscriptionName`|	Subscription name|	None
-SubscriptionType| `subscriptionType`|	Subscription type <br /><br />Four subscription types are available:<li>Exclusive</li><li>Failover</li><li>Shared</li><li>Key_Shared</li>|SubscriptionType.Exclusive
-int | `receiverQueueSize` | Size of a consumer's receiver queue. <br /><br />For example, the number of messages accumulated by a consumer before an application calls `Receive`. <br /><br />A value higher than the default value increases consumer throughput, though at the expense of more memory utilization.| 1000
-long|`acknowledgementsGroupTimeMicros`|Group a consumer acknowledgment for a specified time.<br /><br />By default, a consumer uses 100ms grouping time to send out acknowledgments to a broker.<br /><br />Setting a group time of 0 sends out acknowledgments immediately. <br /><br />A longer ack group time is more efficient at the expense of a slight increase in message re-deliveries after a failure.|TimeUnit.MILLISECONDS.toMicros(100)
-long|`negativeAckRedeliveryDelayMicros`|Delay to wait before redelivering messages that failed to be processed.<br /><br /> When an application uses {@link Consumer#negativeAcknowledge(Message)}, failed messages are redelivered after a fixed timeout. |TimeUnit.MINUTES.toMicros(1)
-int |`maxTotalReceiverQueueSizeAcrossPartitions`|The max total receiver queue size across partitions.<br /><br />This setting reduces the receiver queue size for individual partitions if the total receiver queue size exceeds this value.|50000
-String|`consumerName`|Consumer name|null
-long|`ackTimeoutMillis`|Timeout of unacked messages|0
-long|`tickDurationMillis`|Granularity of the ack-timeout redelivery.<br /><br />Using an higher `tickDurationMillis` reduces the memory overhead to track messages when setting ack-timeout to a bigger value (for example, 1 hour).|1000
-int|`priorityLevel`|Priority level for a consumer to which a broker gives more priority while dispatching messages in Shared subscription type. <br /><br />The broker follows descending priorities. For example, 0=max-priority, 1, 2,...<br /><br />In shared subscription type, the broker **first dispatches messages to the max priority level consumers if they have permits**. Otherwise, the broker considers next priority level consumers.<br /><br /> **Example 1**<br /><br />If a subscription [...]
-ConsumerCryptoFailureAction|`cryptoFailureAction`|Consumer should take action when it receives a message that can not be decrypted.<br /><br /><li>**FAIL**: this is the default option to fail messages until crypto succeeds.</li><br /><li> **DISCARD**:silently acknowledge and not deliver message to an application.</li><br /><li>**CONSUME**: deliver encrypted messages to applications. It is the application's responsibility to decrypt the message.<br /><br />The decompression of message fai [...]
-SortedMap<String, String>|`properties`|A name or value property of this consumer.<br /><br />`properties` is application defined metadata attached to a consumer. <br /><br />When getting a topic stats, associate this metadata with the consumer stats for easier identification.|new TreeMap()
-boolean|`readCompacted`|If enabling `readCompacted`, a consumer reads messages from a compacted topic rather than reading a full message backlog of a topic.<br /><br /> A consumer only sees the latest value for each key in the compacted topic, up until reaching the point in the topic message when compacting backlog. Beyond that point, send messages as normal.<br /><br />Only enabling `readCompacted` on subscriptions to persistent topics, which have a single active consumer (like failure  [...]
-SubscriptionInitialPosition|`subscriptionInitialPosition`|Initial position at which to set cursor when subscribing to a topic at first time.|SubscriptionInitialPosition.Latest
-int|`patternAutoDiscoveryPeriod`|Topic auto discovery period when using a pattern for topic's consumer.<br /><br />The default and minimum value is 1 minute.|1
-RegexSubscriptionMode|`regexSubscriptionMode`|When subscribing to a topic using a regular expression, you can pick a certain type of topics.<br /><br /><li>**PersistentOnly**: only subscribe to persistent topics.</li><br /><li>**NonPersistentOnly**: only subscribe to non-persistent topics.</li><br /><li>**AllTopics**: subscribe to both persistent and non-persistent topics.</li>|RegexSubscriptionMode.PersistentOnly
-DeadLetterPolicy|`deadLetterPolicy`|Dead letter policy for consumers.<br /><br />By default, some messages are probably redelivered many times, even to the extent that it never stops.<br /><br />By using the dead letter mechanism, messages have the max redelivery count. **When exceeding the maximum number of redeliveries, messages are sent to the Dead Letter Topic and acknowledged automatically**.<br /><br />You can enable the dead letter mechanism by setting `deadLetterPolicy`.<br /><br [...]
-boolean|`autoUpdatePartitions`|If `autoUpdatePartitions` is enabled, a consumer subscribes to partition increasement automatically.<br /><br />**Note**: this is only for partitioned consumers.|true
-boolean|`replicateSubscriptionState`|If `replicateSubscriptionState` is enabled, a subscription state is replicated to geo-replicated clusters.|false
+`topicNames`| Set&lt;String&gt;|		Topic name|	Sets.newTreeSet()
+ `topicsPattern`|Pattern|  	Topic pattern	|None
+`subscriptionName`|String|		Subscription name|	None
+`subscriptionType`|SubscriptionType| 	Subscription type <br />Four subscription types are available:<li>Exclusive</li><li>Failover</li><li>Shared</li><li>Key_Shared</li>|SubscriptionType.Exclusive
+`receiverQueueSize` |int |  Size of a consumer's receiver queue. <br /><br />For example, the number of messages accumulated by a consumer before an application calls `Receive`. <br /><br />A value higher than the default value increases consumer throughput, though at the expense of more memory utilization.| 1000
+`acknowledgementsGroupTimeMicros`|long|Group a consumer acknowledgment for a specified time.<br /><br />By default, a consumer uses 100ms grouping time to send out acknowledgments to a broker.<br /><br />Setting a group time of 0 sends out acknowledgments immediately. <br /><br />A longer ack group time is more efficient at the expense of a slight increase in message re-deliveries after a failure.|TimeUnit.MILLISECONDS.toMicros(100)
+`negativeAckRedeliveryDelayMicros`|long|Delay to wait before redelivering messages that failed to be processed.<br /><br /> When an application uses {@link Consumer#negativeAcknowledge(Message)}, failed messages are redelivered after a fixed timeout. |TimeUnit.MINUTES.toMicros(1)
+`maxTotalReceiverQueueSizeAcrossPartitions`|int |The max total receiver queue size across partitions.<br /><br />This setting reduces the receiver queue size for individual partitions if the total receiver queue size exceeds this value.|50000
+`consumerName`|String|Consumer name|null
+`ackTimeoutMillis`|long|Timeout of unacked messages|0
+`tickDurationMillis`|long|Granularity of the ack-timeout redelivery.<br /><br />Using an higher `tickDurationMillis` reduces the memory overhead to track messages when setting ack-timeout to a bigger value (for example, 1 hour).|1000
+`priorityLevel`|int|Priority level for a consumer to which a broker gives more priority while dispatching messages in Shared subscription type. <br /><br />The broker follows descending priorities. For example, 0=max-priority, 1, 2,...<br /><br />In Shared subscription type, the broker **first dispatches messages to the max priority level consumers if they have permits**. Otherwise, the broker considers next priority level consumers.<br /><br /> **Example 1**<br />If a subscription has c [...]
+`cryptoFailureAction`|ConsumerCryptoFailureAction|Consumer should take action when it receives a message that can not be decrypted.<br /><li>**FAIL**: this is the default option to fail messages until crypto succeeds.</li><li> **DISCARD**:silently acknowledge and not deliver message to an application.</li><li>**CONSUME**: deliver encrypted messages to applications. It is the application's responsibility to decrypt the message.</li><br />The decompression of message fails. <br /><br />If  [...]
+`properties`|SortedMap<String, String>|A name or value property of this consumer.<br /><br />`properties` is application defined metadata attached to a consumer. <br /><br />When getting a topic stats, associate this metadata with the consumer stats for easier identification.|new TreeMap()
+`readCompacted`|boolean|If enabling `readCompacted`, a consumer reads messages from a compacted topic rather than reading a full message backlog of a topic.<br /><br /> A consumer only sees the latest value for each key in the compacted topic, up until reaching the point in the topic message when compacting backlog. Beyond that point, send messages as normal.<br /><br />Only enabling `readCompacted` on subscriptions to persistent topics, which have a single active consumer (like failure  [...]
+`subscriptionInitialPosition`|SubscriptionInitialPosition|Initial position at which to set cursor when subscribing to a topic at first time.|SubscriptionInitialPosition.Latest
+`patternAutoDiscoveryPeriod`|int|Topic auto discovery period when using a pattern for topic's consumer.<br /><br />The default and minimum value is 1 minute.|1
+`regexSubscriptionMode`|RegexSubscriptionMode|When subscribing to a topic using a regular expression, you can pick a certain type of topics.<br /><br /><li>**PersistentOnly**: only subscribe to persistent topics.</li><li>**NonPersistentOnly**: only subscribe to non-persistent topics.</li><li>**AllTopics**: subscribe to both persistent and non-persistent topics.</li>|RegexSubscriptionMode.PersistentOnly
+`deadLetterPolicy`|DeadLetterPolicy|Dead letter policy for consumers.<br /><br />By default, some messages are probably redelivered many times, even to the extent that it never stops.<br /><br />By using the dead letter mechanism, messages have the max redelivery count. **When exceeding the maximum number of redeliveries, messages are sent to the Dead Letter Topic and acknowledged automatically**.<br /><br />You can enable the dead letter mechanism by setting `deadLetterPolicy`.<br /><br [...]
+`autoUpdatePartitions`|boolean|If `autoUpdatePartitions` is enabled, a consumer subscribes to partition increasement automatically.<br /><br />**Note**: this is only for partitioned consumers.|true
+`replicateSubscriptionState`|boolean|If `replicateSubscriptionState` is enabled, a subscription state is replicated to geo-replicated clusters.|false
+`negativeAckRedeliveryBackoff`|NegativeAckRedeliveryBackoff|Interface for custom message is negativeAcked policy. You can specify `NegativeAckRedeliveryBackoff` for a consumer.| `NegativeAckRedeliveryExponentialBackoff`
 
 You can configure parameters if you do not want to use the default configuration. For a full list, see the Javadoc for the {@inject: javadoc:ConsumerBuilder:/client/org/apache/pulsar/client/api/ConsumerBuilder} class. 
 
@@ -441,6 +462,30 @@ BatchReceivePolicy.builder()
 
 :::
 
+### Negative acknowledgment redelivery backoff
+
+The `NegativeAckRedeliveryBackoff` introduces a redelivery backoff mechanism. You can achieve redelivery with different delays by setting `redeliveryCount ` of messages. 
+
+```java
+
+Consumer consumer =  client.newConsumer()
+        .topic("my-topic")
+        .subscriptionName("my-subscription")
+        .negativeAckRedeliveryBackoff(NegativeAckRedeliveryExponentialBackoff.builder()
+                .minNackTimeMs(1000)
+                .maxNackTimeMs(60 * 1000)
+                .build())
+        .subscribe();
+
+```
+
+:::note
+
+- The `negativeAckRedeliveryBackoff` does not work with `consumer.negativeAcknowledge(MessageId messageId)` because you are not able to get the redelivery count from the message ID.
+- If a consumer crashes, it triggers the redelivery of unacked messages. In this case, `NegativeAckRedeliveryBackoff` does not take effect and the messages might get redelivered earlier than the delay time from the backoff.
+
+:::
+
 ### Multi-topic subscriptions
 
 In addition to subscribing a consumer to a single Pulsar topic, you can also subscribe to multiple topics simultaneously using [multi-topic subscriptions](concepts-messaging.md#multi-topic-subscriptions). To use multi-topic subscriptions you can supply either a regular expression (regex) or a `List` of topics. If you select topics via regex, all topics must be within the same Pulsar namespace.
@@ -542,7 +587,7 @@ Pulsar has various [subscription types](concepts-messaging#subscription-types) t
 
 A subscription is identical with the subscription name; a subscription name can specify only one subscription type at a time. To change the subscription type, you should first stop all consumers of this subscription.
 
-Different subscription types have different message distribution modes. This section describes the differences of subscription types and how to use them.
+Different subscription types have different message distribution types. This section describes the differences of subscription types and how to use them.
 
 In order to better describe their differences, assuming you have a topic named "my-topic", and the producer has published 10 messages.
 
@@ -662,7 +707,7 @@ Consumer consumer2 = client.newConsumer()
 
 ```
 
-In shared subscription type, multiple consumers can attach to the same subscription and messages are delivered in a round robin distribution across consumers.
+In Shared subscription type, multiple consumers can attach to the same subscription and messages are delivered in a round robin distribution across consumers.
 
 If a broker dispatches only one message at a time, consumer1 receives the following information.
 
@@ -711,7 +756,7 @@ Consumer consumer2 = client.newConsumer()
 
 ```
 
-`Key_Shared` subscription is like `Shared` subscription, all consumers can attach to the same subscription. But it is different from `Key_Shared` subscription, messages with the same key are delivered to only one consumer in order. The possible distribution of messages between different consumers (by default we do not know in advance which keys will be assigned to a consumer, but a key will only be assigned to a consumer at the same time).
+Just like in `Shared` subscription, all consumers in `Key_Shared` subscription type can attach to the same subscription. But `Key_Shared` subscription type is different from the `Shared` subscription. In `Key_Shared` subscription type, messages with the same key are delivered to only one consumer in order. The possible distribution of messages between different consumers (by default we do not know in advance which keys will be assigned to a consumer, but a key will only be assigned to a  [...]
 
 consumer1 receives the following information.
 
@@ -794,18 +839,18 @@ The code sample above shows pointing the `Reader` object to a specific message (
 ### Configure reader
 When you create a reader, you can use the `loadConf` configuration. The following parameters are available in `loadConf`.
 
-| Type | Name | <div>Description</div> | Default
+| Name | Type|  <div>Description</div> | Default
 |---|---|---|---
-String|`topicName`|Topic name. |None
-int|`receiverQueueSize`|Size of a consumer's receiver queue.<br /><br />For example, the number of messages that can be accumulated by a consumer before an application calls `Receive`.<br /><br />A value higher than the default value increases consumer throughput, though at the expense of more memory utilization.|1000
-ReaderListener&lt;T&gt;|`readerListener`|A listener that is called for message received.|None
-String|`readerName`|Reader name.|null
-String| `subscriptionName`|Subscription name|When there is a single topic, the default subscription name is `"reader-" + 10-digit UUID`.<br />When there are multiple topics, the default subscription name is `"multiTopicsReader-" + 10-digit UUID`.
-String|`subscriptionRolePrefix`|Prefix of subscription role. |null
-CryptoKeyReader|`cryptoKeyReader`|Interface that abstracts the access to a key store.|null
-ConsumerCryptoFailureAction|`cryptoFailureAction`|Consumer should take action when it receives a message that can not be decrypted.<br /><br /><li>**FAIL**: this is the default option to fail messages until crypto succeeds.</li><br /><li> **DISCARD**: silently acknowledge and not deliver message to an application.</li><br /><li>**CONSUME**: deliver encrypted messages to applications. It is the application's responsibility to decrypt the message.<br /><br />The message decompression fails [...]
-boolean|`readCompacted`|If enabling `readCompacted`, a consumer reads messages from a compacted topic rather than a full message backlog of a topic.<br /><br /> A consumer only sees the latest value for each key in the compacted topic, up until reaching the point in the topic message when compacting backlog. Beyond that point, send messages as normal.<br /><br />`readCompacted` can only be enabled on subscriptions to persistent topics, which have a single active consumer (for example, fa [...]
-boolean|`resetIncludeHead`|If set to true, the first message to be returned is the one specified by `messageId`.<br /><br />If set to false, the first message to be returned is the one next to the message specified by `messageId`.|false
+`topicName`|String|Topic name. |None
+`receiverQueueSize`|int|Size of a consumer's receiver queue.<br /><br />For example, the number of messages that can be accumulated by a consumer before an application calls `Receive`.<br /><br />A value higher than the default value increases consumer throughput, though at the expense of more memory utilization.|1000
+`readerListener`|ReaderListener&lt;T&gt;|A listener that is called for message received.|None
+`readerName`|String|Reader name.|null
+`subscriptionName`|String| Subscription name|When there is a single topic, the default subscription name is `"reader-" + 10-digit UUID`.<br />When there are multiple topics, the default subscription name is `"multiTopicsReader-" + 10-digit UUID`.
+`subscriptionRolePrefix`|String|Prefix of subscription role. |null
+`cryptoKeyReader`|CryptoKeyReader|Interface that abstracts the access to a key store.|null
+`cryptoFailureAction`|ConsumerCryptoFailureAction|Consumer should take action when it receives a message that can not be decrypted.<br /><li>**FAIL**: this is the default option to fail messages until crypto succeeds.</li><li> **DISCARD**: silently acknowledge and not deliver message to an application.</li><li>**CONSUME**: deliver encrypted messages to applications. It is the application's responsibility to decrypt the message.</li><br />The message decompression fails. <br /><br />If me [...]
+`readCompacted`|boolean|If enabling `readCompacted`, a consumer reads messages from a compacted topic rather than a full message backlog of a topic.<br /><br /> A consumer only sees the latest value for each key in the compacted topic, up until reaching the point in the topic message when compacting backlog. Beyond that point, send messages as normal.<br /><br />`readCompacted` can only be enabled on subscriptions to persistent topics, which have a single active consumer (for example, fa [...]
+`resetIncludeHead`|boolean|If set to true, the first message to be returned is the one specified by `messageId`.<br /><br />If set to false, the first message to be returned is the one next to the message specified by `messageId`.|false
 
 ### Sticky key range reader
 
@@ -827,7 +872,7 @@ Total hash range size is 65536, so the max end of the range should be less than
 
 ## Schema
 
-In Pulsar, all message data consists of byte arrays "under the hood." [Message schemas](schema-get-started) enable you to use other types of data when constructing and handling messages (from simple types like strings to more complex, application-specific types). If you construct, say, a [producer](#producers) without specifying a schema, then the producer can only produce messages of type `byte[]`. The following is an example.
+In Pulsar, all message data consists of byte arrays "under the hood." [Message schemas](schema-get-started) enable you to use other types of data when constructing and handling messages (from simple types like strings to more complex, application-specific types). If you construct, say, a [producer](#producer) without specifying a schema, then the producer can only produce messages of type `byte[]`. The following is an example.
 
 ```java
 
diff --git a/site2/website-next/versioned_docs/version-2.2.0/getting-started-clients.md b/site2/website-next/versioned_docs/version-2.2.0/client-libraries.md
similarity index 100%
rename from site2/website-next/versioned_docs/version-2.2.0/getting-started-clients.md
rename to site2/website-next/versioned_docs/version-2.2.0/client-libraries.md
diff --git a/site2/website-next/versioned_docs/version-2.2.0/concepts-architecture-overview.md b/site2/website-next/versioned_docs/version-2.2.0/concepts-architecture-overview.md
index 6a501d2..8fe0717 100644
--- a/site2/website-next/versioned_docs/version-2.2.0/concepts-architecture-overview.md
+++ b/site2/website-next/versioned_docs/version-2.2.0/concepts-architecture-overview.md
@@ -2,7 +2,6 @@
 id: concepts-architecture-overview
 title: Architecture Overview
 sidebar_label: "Architecture"
-original_id: concepts-architecture-overview
 ---
 
 At the highest level, a Pulsar instance is composed of one or more Pulsar clusters. Clusters within an instance can [replicate](concepts-replication) data amongst themselves.
@@ -146,7 +145,7 @@ Some important things to know about the Pulsar proxy:
 
 ## Service discovery
 
-[Clients](getting-started-clients) connecting to Pulsar brokers need to be able to communicate with an entire Pulsar instance using a single URL. Pulsar provides a built-in service discovery mechanism that you can set up using the instructions in the [Deploying a Pulsar instance](deploy-bare-metal.md#service-discovery-setup) guide.
+[Clients](getting-started-clients) connecting to Pulsar brokers need to be able to communicate with an entire Pulsar instance using a single URL.
 
 You can use your own service discovery system if you'd like. If you use your own system, there is just one requirement: when a client performs an HTTP request to an endpoint, such as `http://pulsar.us-west.example.com:8080`, the client needs to be redirected to *some* active broker in the desired cluster, whether via DNS, an HTTP or IP redirect, or some other means.
 
diff --git a/site2/website-next/versioned_docs/version-2.2.0/concepts-authentication.md b/site2/website-next/versioned_docs/version-2.2.0/concepts-authentication.md
index b375ecb..335da8d 100644
--- a/site2/website-next/versioned_docs/version-2.2.0/concepts-authentication.md
+++ b/site2/website-next/versioned_docs/version-2.2.0/concepts-authentication.md
@@ -2,7 +2,6 @@
 id: concepts-authentication
 title: Authentication and Authorization
 sidebar_label: "Authentication and Authorization"
-original_id: concepts-authentication
 ---
 
 Pulsar supports a pluggable [authentication](security-overview.md) mechanism which can be configured at the proxy and/or the broker. Pulsar also supports a pluggable [authorization](security-authorization) mechanism. These mechanisms work together to identify the client and its access rights on topics, namespaces and tenants.
diff --git a/site2/website-next/versioned_docs/version-2.2.0/concepts-clients.md b/site2/website-next/versioned_docs/version-2.2.0/concepts-clients.md
index b68f76a..65201b5 100644
--- a/site2/website-next/versioned_docs/version-2.2.0/concepts-clients.md
+++ b/site2/website-next/versioned_docs/version-2.2.0/concepts-clients.md
@@ -2,7 +2,6 @@
 id: concepts-clients
 title: Pulsar Clients
 sidebar_label: "Clients"
-original_id: concepts-clients
 ---
 
 Pulsar exposes a client API with language bindings for [Java](client-libraries-java.md), [Go](client-libraries-go.md), [Python](client-libraries-python.md), [C++](client-libraries-cpp.md) and [C#](client-libraries-dotnet). The client API optimizes and encapsulates Pulsar's client-broker communication protocol and exposes a simple and intuitive API for use by applications.
diff --git a/site2/website-next/versioned_docs/version-2.2.0/concepts-messaging.md b/site2/website-next/versioned_docs/version-2.2.0/concepts-messaging.md
index 70977e3..c9d3ea2 100644
--- a/site2/website-next/versioned_docs/version-2.2.0/concepts-messaging.md
+++ b/site2/website-next/versioned_docs/version-2.2.0/concepts-messaging.md
@@ -2,16 +2,17 @@
 id: concepts-messaging
 title: Messaging
 sidebar_label: "Messaging"
-original_id: concepts-messaging
 ---
 
 import Tabs from '@theme/Tabs';
 import TabItem from '@theme/TabItem';
 
 
-Pulsar is built on the [publish-subscribe](https://en.wikipedia.org/wiki/Publish%E2%80%93subscribe_pattern) pattern (often abbreviated to pub-sub). In this pattern, [producers](#producers) publish messages to [topics](#topics). [Consumers](#consumers) [subscribe](#subscription-types) to those topics, process incoming messages, and send an acknowledgement when processing is complete.
+Pulsar is built on the [publish-subscribe](https://en.wikipedia.org/wiki/Publish%E2%80%93subscribe_pattern) pattern (often abbreviated to pub-sub). In this pattern, [producers](#producers) publish messages to [topics](#topics); [consumers](#consumers) [subscribe](#subscription-types) to those topics, process incoming messages, and send [acknowledgements](#acknowledgement) to the broker when processing is finished.
 
-When a subscription is created, Pulsar [retains](concepts-architecture-overview.md#persistent-storage) all messages, even if the consumer is disconnected. Retained messages are discarded only when a consumer acknowledges that those messages are processed successfully.
+When a subscription is created, Pulsar [retains](concepts-architecture-overview.md#persistent-storage) all messages, even if the consumer is disconnected. The retained messages are discarded only when a consumer acknowledges that all these messages are processed successfully. 
+
+If the consumption of a message fails and you want this message to be consumed again, you can enable [message redelivery mechanism](#message-redelivery) to request the broker to resend this message.
 
 ## Messages
 
@@ -48,17 +49,17 @@ The default size of a message is 5 MB. You can configure the max size of a messa
   
   ```
 
-> For more information on Pulsar message contents, see Pulsar [binary protocol](developing-binary-protocol).
+> For more information on Pulsar messages, see Pulsar [binary protocol](developing-binary-protocol).
 
 ## Producers
 
-A producer is a process that attaches to a topic and publishes messages to a Pulsar [broker](reference-terminology.md#broker). The Pulsar broker process the messages.
+A producer is a process that attaches to a topic and publishes messages to a Pulsar [broker](reference-terminology.md#broker). The Pulsar broker processes the messages.
 
 ### Send modes
 
 Producers send messages to brokers synchronously (sync) or asynchronously (async).
 
-| Mode       | Description |            
+| Mode       | Description |
 |:-----------|-----------|
 | Sync send  | The producer waits for an acknowledgement from the broker after sending every message. If the acknowledgment is not received, the producer treats the sending operation as a failure.                                                                                                                                                                                    |
 | Async send | The producer puts a message in a blocking queue and returns immediately. The client library sends the message to the broker in the background. If the queue is full (you can [configure](reference-configuration.md#broker) the maximum size), the producer is blocked or fails immediately when calling the API, depending on arguments passed to the producer. |
@@ -75,12 +76,12 @@ You can have different types of access modes on topics for producers.
 
 :::note
 
-Once an application creates a producer with the `Exclusive` or `WaitForExclusive` access mode successfully, the instance of the application is guaranteed to be the **only one writer** on the topic. Other producers trying to produce on this topic get errors immediately or have to wait until they get the `Exclusive` access. 
+Once an application creates a producer with `Exclusive` or `WaitForExclusive` access mode successfully, the instance of this application is guaranteed to be the **only writer** to the topic. Any other producers trying to produce messages on this topic will either get errors immediately or have to wait until they get the `Exclusive` access. 
 For more information, see [PIP 68: Exclusive Producer](https://github.com/apache/pulsar/wiki/PIP-68:-Exclusive-Producer).
 
 :::
 
-You can set producer access mode through Java Client API. For more information, see `ProducerAccessMode` in [ProducerBuilder.java](https://github.com/apache/pulsar/blob/fc5768ca3bbf92815d142fe30e6bfad70a1b4fc6/pulsar-client-api/src/main/java/org/apache/pulsar/client/api/ProducerBuilder.java).
+You can set producer access mode through Java Client API. For more information, see `ProducerAccessMode` in [ProducerBuilder.java](https://github.com/apache/pulsar/blob/fc5768ca3bbf92815d142fe30e6bfad70a1b4fc6/pulsar-client-api/src/main/java/org/apache/pulsar/client/api/ProducerBuilder.java) file.
 
 
 ### Compression
@@ -98,17 +99,17 @@ When batching is enabled, the producer accumulates and sends a batch of messages
 
 In Pulsar, batches are tracked and stored as single units rather than as individual messages. Consumer unbundles a batch into individual messages. However, scheduled messages (configured through the `deliverAt` or the `deliverAfter` parameter) are always sent as individual messages even batching is enabled.
 
-In general, a batch is acknowledged when all of its messages are acknowledged by a consumer. It means unexpected failures, negative acknowledgements, or acknowledgement timeouts can result in redelivery of all messages in a batch, even if some of the messages are acknowledged.
+In general, a batch is acknowledged when all of its messages are acknowledged by a consumer. It means that when **not all** batch messages are acknowledged, then unexpected failures, negative acknowledgements, or acknowledgement timeouts can result in a redelivery of all messages in this batch.
 
-To avoid redelivering acknowledged messages in a batch to the consumer, Pulsar introduces batch index acknowledgement since Pulsar 2.6.0. When batch index acknowledgement is enabled, the consumer filters out the batch index that has been acknowledged and sends the batch index acknowledgement request to the broker. The broker maintains the batch index acknowledgement status and tracks the acknowledgement status of each batch index to avoid dispatching acknowledged messages to the consumer [...]
+To avoid redelivering acknowledged messages in a batch to the consumer, Pulsar introduces batch index acknowledgement since Pulsar 2.6.0. When batch index acknowledgement is enabled, the consumer filters out the batch index that has been acknowledged and sends the batch index acknowledgement request to the broker. The broker maintains the batch index acknowledgement status and tracks the acknowledgement status of each batch index to avoid dispatching acknowledged messages to the consumer [...]
 
 By default, batch index acknowledgement is disabled (`acknowledgmentAtBatchIndexLevelEnabled=false`). You can enable batch index acknowledgement by setting the `acknowledgmentAtBatchIndexLevelEnabled` parameter to `true` at the broker side. Enabling batch index acknowledgement results in more memory overheads. 
 
 ### Chunking
-When you enable chunking, read the following instructions.
+Before you enable chunking, read the following instructions.
 - Batching and chunking cannot be enabled simultaneously. To enable chunking, you must disable batching in advance.
 - Chunking is only supported for persisted topics.
-- Chunking is only supported for the exclusive and failover subscription types.
+- Chunking is only supported for Exclusive and Failover subscription types.
 
 When chunking is enabled (`chunkingEnabled=true`), if the message size is greater than the allowed maximum publish-payload size, the producer splits the original message into chunked messages and publishes them with chunked metadata to the broker separately and in order. At the broker side, the chunked messages are stored in the managed-ledger in the same way as that of ordinary messages. The only difference is that the consumer needs to buffer the chunked messages and combines them into [...]
 
@@ -149,71 +150,142 @@ Client libraries provide listener implementation for consumers. For example, the
 
 ### Acknowledgement
 
-When a consumer consumes a message successfully, the consumer sends an acknowledgement request to the broker. This message is permanently stored, and then deleted only after all the subscriptions have acknowledged it. If you want to store the message that has been acknowledged by a consumer, you need to configure the [message retention policy](concepts-messaging.md#message-retention-and-expiry).
+The consumer sends an acknowledgement request to the broker after it consumes a message successfully. Then, this consumed message will be permanently stored, and be deleted only after all the subscriptions have acknowledged it. If you want to store the messages that have been acknowledged by a consumer, you need to configure the [message retention policy](concepts-messaging.md#message-retention-and-expiry).
+
+For batch messages, you can enable batch index acknowledgement to avoid dispatching acknowledged messages to the consumer. For details about batch index acknowledgement, see [batching](#batching).
+
+Messages can be acknowledged in one of the following two ways:
+
+- Being acknowledged individually. With individual acknowledgement, the consumer acknowledges each message and sends an acknowledgement request to the broker.
+- Being acknowledged cumulatively. With cumulative acknowledgement, the consumer **only** acknowledges the last message it received. All messages in the stream up to (and including) the provided message are not redelivered to that consumer.
 
-For a batch message, if batch index acknowledgement is enabled, the broker maintains the batch index acknowledgement status and tracks the acknowledgement status of each batch index to avoid dispatching acknowledged messages to the consumer. When all indexes of the batch message are acknowledged, the batch message is deleted. For details about the batch index acknowledgement, see [batching](#batching).
+If you want to acknowledge messages individually, you can use the following API.
 
-Messages can be acknowledged in the following two ways:
+```java
+
+consumer.acknowledge(msg);
+
+```
 
-- Messages are acknowledged individually. With individual acknowledgement, the consumer needs to acknowledge each message and sends an acknowledgement request to the broker.
-- Messages are acknowledged cumulatively. With cumulative acknowledgement, the consumer only needs to acknowledge the last message it received. All messages in the stream up to (and including) the provided message are not re-delivered to that consumer.
+If you want to acknowledge messages cumulatively, you can use the following API.
+
+```java
+
+consumer.acknowledgeCumulative(msg);
+
+```
 
 :::note
 
-Cumulative acknowledgement cannot be used in [Shared subscription type](#subscription-types), because this subscription type involves multiple consumers which have access to the same subscription. In Shared subscription type, messages are acknowledged individually.
+Cumulative acknowledgement cannot be used in [Shared subscription type](#subscription-types), because Shared subscription type involves multiple consumers which have access to the same subscription. In Shared subscription type, messages are acknowledged individually.
 
 :::
 
 ### Negative acknowledgement
 
-When a consumer does not consume a message successfully at a time, and wants to consume the message again, the consumer sends a negative acknowledgement to the broker, and then the broker redelivers the message.
+The [negative acknowledgement](#negative-acknowledgement) mechanism allows you to send a notification to the broker indicating the consumer did not process a message.  When a consumer fails to consume a message and needs to re-consume it, the consumer sends a negative acknowledgement (nack) to the broker, triggering the broker to redeliver this message to the consumer.
+
+Messages are negatively acknowledged individually or cumulatively, depending on the consumption subscription type.
+
+In Exclusive and Failover subscription types, consumers only negatively acknowledge the last message they receive.
 
-Messages are negatively acknowledged either individually or cumulatively, depending on the consumption subscription type.
+In Shared and Key_Shared subscription types, consumers can negatively acknowledge messages individually.
 
-In the exclusive and failover subscription types, consumers only negatively acknowledge the last message they receive.
+Be aware that negative acknowledgments on ordered subscription types, such as Exclusive, Failover and Key_Shared, might cause failed messages being sent to consumers out of the original order.
 
-In the shared and Key_Shared subscription types, you can negatively acknowledge messages individually.
+Use the following API to negatively acknowledge message consumption.
 
-Be aware that negative acknowledgment on ordered subscription types, such as Exclusive, Failover and Key_Shared, can cause failed messages to arrive consumers out of the original order.
+```java
+
+Consumer<byte[]> consumer = pulsarClient.newConsumer()
+                .topic(topic)
+                .subscriptionName("sub-negative-ack")
+                .subscriptionInitialPosition(SubscriptionInitialPosition.Earliest)
+                .negativeAckRedeliveryDelay(2, TimeUnit.SECONDS) // the default value is 1 min
+                .subscribe();
+
+Message<byte[]> message = consumer.receive();
+
+// call the API to send negative acknowledgement
+consumer.negativeAcknowledge(message);
+
+message = consumer.receive();
+consumer.acknowledge(message);
+
+```
 
 :::note
 
-If batching is enabled, other messages and the negatively acknowledged messages in the same batch are redelivered to the consumer.
+If batching is enabled, all messages in one batch are redelivered to the consumer.
 
 :::
 
+### Negative redelivery backoff
+
+It happens sometimes that consumers fail to process messages successfully. In this case, you can use [negative acknowledgement](#negative-acknowledgement) to redeliver the messages after consumption failures. For the Shared subscription type, the messages are redelivered to other consumers; for other subscription types, the messages are redelivered to the same consumer.
+
+But this is not flexible enough. A better way is to use the **redelivery backoff mechanism**. You can redeliver messages with different delays by setting the number of times the messages are retried.
+
+Use the following API to enable `Negative Redelivery Backoff`.
+
+```java
+
+consumer.negativeAckRedeliveryBackoff(NegativeAckRedeliveryExponentialBackoff.builder()
+        .minNackTimeMs(1000)
+        .maxNackTimeMs(60 * 1000)
+        .build())
+
+```
+
 ### Acknowledgement timeout
 
-If a message is not consumed successfully, and you want to trigger the broker to redeliver the message automatically, you can adopt the unacknowledged message automatic re-delivery mechanism. Client tracks the unacknowledged messages within the entire `acktimeout` time range, and sends a `redeliver unacknowledged messages` request to the broker automatically when the acknowledgement timeout is specified.
+The acknowledgement timeout mechanism allows you to set a time range during which the client tracks the unacknowledged messages. After this acknowledgement timeout (`ackTimeout`) period, the client sends `redeliver unacknowledged messages` request to the broker, thus the broker resends the unacknowledged messages to the consumer.
+
+You can configure the acknowledgement timeout mechanism to redeliver the message if it is not acknowledged after `ackTimeout` or to execute a timer task to check the acknowledgement timeout messages during every `ackTimeoutTickTime` period.
 
 :::note
 
-If batching is enabled, other messages and the unacknowledged messages in the same batch are redelivered to the consumer.
+- If batching is enabled, all messages in one batch are redelivered to the consumer.  
+- Compared with acknowledgement timeout, negative acknowledgement is preferred. First, it is difficult to set a timeout value. Second, a broker resends messages when the message processing time exceeds the acknowledgement timeout, but these messages might not need to be re-consumed.
 
 :::
 
-:::note
+Use the following API to enable acknowledgement timeout.
 
-Prefer negative acknowledgements over acknowledgement timeout. Negative acknowledgement controls the re-delivery of individual messages with more precision, and avoids invalid redeliveries when the message processing time exceeds the acknowledgement timeout.
+```java
 
-:::
+Consumer<byte[]> consumer = pulsarClient.newConsumer()
+                .topic(topic)
+                .ackTimeout(2, TimeUnit.SECONDS) // the default value is 0
+                .ackTimeoutTickTime(1, TimeUnit.SECONDS)
+                .subscriptionName("sub")
+                .subscriptionInitialPosition(SubscriptionInitialPosition.Earliest)
+                .subscribe();
+
+Message<byte[]> message = consumer.receive();
+
+// wait at least 2 seconds
+message = consumer.receive();
+consumer.acknowledge(message);
+
+```
 
 ### Dead letter topic
 
-Dead letter topic enables you to consume new messages when some messages cannot be consumed successfully by a consumer. In this mechanism, messages that are failed to be consumed are stored in a separate topic, which is called dead letter topic. You can decide how to handle messages in the dead letter topic.
+Dead letter topic allows you to continue message consumption even some messages are not consumed successfully. The messages that are failed to be consumed are stored in a specific topic, which is called dead letter topic. You can decide how to handle the messages in the dead letter topic.
 
-The following example shows how to enable dead letter topic in a Java client using the default dead letter topic:
+Enable dead letter topic in a Java client using the default dead letter topic.
 
 ```java
 
 Consumer<byte[]> consumer = pulsarClient.newConsumer(Schema.BYTES)
-              .topic(topic)
-              .subscriptionName("my-subscription")
-              .subscriptionType(SubscriptionType.Shared)
-              .deadLetterPolicy(DeadLetterPolicy.builder()
-                    .maxRedeliverCount(maxRedeliveryCount)
-                    .build())
-              .subscribe();
+                .topic(topic)
+                .subscriptionName("my-subscription")
+                .subscriptionType(SubscriptionType.Shared)
+                .deadLetterPolicy(DeadLetterPolicy.builder()
+                      .maxRedeliverCount(maxRedeliveryCount)
+                      .build())
+                .subscribe();
 
 ```
 
@@ -225,38 +297,37 @@ The default dead letter topic uses this format:
 
 ```
 
-  
-If you want to specify the name of the dead letter topic, use this Java client example:
+Use the Java client to specify the name of the dead letter topic.
 
 ```java
 
 Consumer<byte[]> consumer = pulsarClient.newConsumer(Schema.BYTES)
-              .topic(topic)
-              .subscriptionName("my-subscription")
-              .subscriptionType(SubscriptionType.Shared)
-              .deadLetterPolicy(DeadLetterPolicy.builder()
-                    .maxRedeliverCount(maxRedeliveryCount)
-                    .deadLetterTopic("your-topic-name")
-                    .build())
-              .subscribe();
+                .topic(topic)
+                .subscriptionName("my-subscription")
+                .subscriptionType(SubscriptionType.Shared)
+                .deadLetterPolicy(DeadLetterPolicy.builder()
+                      .maxRedeliverCount(maxRedeliveryCount)
+                      .deadLetterTopic("your-topic-name")
+                      .build())
+                .subscribe();
 
 ```
 
-Dead letter topic depends on message re-delivery. Messages are redelivered either due to [acknowledgement timeout](#acknowledgement-timeout) or [negative acknowledgement](#negative-acknowledgement). If you are going to use negative acknowledgement on a message, make sure it is negatively acknowledged before the acknowledgement timeout. 
+Dead letter topic depends on message redelivery. Messages are redelivered either due to [acknowledgement timeout](#acknowledgement-timeout) or [negative acknowledgement](#negative-acknowledgement). If you are going to use negative acknowledgement on a message, make sure it is negatively acknowledged before the acknowledgement timeout. 
 
 :::note
 
-Currently, dead letter topic is enabled In the shared and Key_Shared subscription types.
+Currently, dead letter topic is enabled in Shared and Key_Shared subscription types.
 
 :::
 
 ### Retry letter topic
 
-For many online business systems, a message is re-consumed due to exception occurs in the business logic processing. To configure the delay time for re-consuming the failed messages, you can configure the producer to send messages to both the business topic and the retry letter topic, and enable automatic retry on the consumer. When automatic retry is enabled on the consumer, a message is stored in the retry letter topic if the messages are not consumed, and therefore the consumer automa [...]
+For many online business systems, a message is re-consumed when exception occurs in the business logic processing. To configure the delay time for re-consuming the failed messages, you can configure the producer to send messages to both the business topic and the retry letter topic, and enable automatic retry on the consumer. With this setting, the messages that are not consumed will be stored in the retry letter topic. After the specified delay time, the consumer automatically consumes  [...]
 
 By default, automatic retry is disabled. You can set `enableRetry` to `true` to enable automatic retry on the consumer.
 
-This example shows how to consume messages from a retry letter topic.
+Use the following API to consume messages from a retry letter topic.
 
 ```java
 
@@ -275,6 +346,44 @@ Consumer<byte[]> consumer = pulsarClient.newConsumer(Schema.BYTES)
 
 ```
 
+The messages in the retry letter topic contain some special properties that are automatically created by the client.
+
+Special property | Description
+:--------------------|:-----------
+`REAL_TOPIC` | The real topic name.
+`ORIGIN_MESSAGE_ID` | The origin message ID. It is crucial for message tracking.
+`RECONSUMETIMES`   | The retry consume times.
+`DELAY_TIME`      | Message delay timeMs.
+**Example**
+
+```
+
+REAL_TOPIC = persistent://public/default/my-topic
+ORIGIN_MESSAGE_ID = 1:0:-1:0
+RECONSUMETIMES = 6
+DELAY_TIME = 3000
+
+```
+
+Use the following API to store the messages in a retrial queue.
+
+```java
+
+consumer.reconsumeLater(msg, 3, TimeUnit.SECONDS);
+
+```
+
+Use the following API to add custom properties for the `reconsumeLater` function.
+
+```java
+
+Map<String, String> customProperties = new HashMap<String, String>();
+customProperties.put("custom-key-1", "custom-value-1");
+customProperties.put("custom-key-2", "custom-value-2");
+consumer.reconsumeLater(msg, customProperties, 3, TimeUnit.SECONDS);
+
+```
+
 ## Topics
 
 As in other pub-sub systems, topics in Pulsar are named channels for transmitting messages from producers to consumers. Topic names are URLs that have a well-defined structure:
@@ -292,7 +401,7 @@ Topic name component | Description
 `namespace`          | The administrative unit of the topic, which acts as a grouping mechanism for related topics. Most topic configuration is performed at the [namespace](#namespaces) level. Each tenant has one or multiple namespaces.
 `topic`              | The final part of the name. Topic names have no special meaning in a Pulsar instance.
 
-> **No need to explicitly create new topics**
+> **No need to explicitly create new topics**  
 > You do not need to explicitly create topics in Pulsar. If a client attempts to write or receive messages to/from a topic that does not yet exist, Pulsar creates that topic under the namespace provided in the [topic name](#topics) automatically.
 > If no tenant or namespace is specified when a client creates a topic, the topic is created in the default tenant and namespace. You can also create a topic in a specified tenant and namespace, such as `persistent://my-tenant/my-namespace/my-topic`. `persistent://my-tenant/my-namespace/my-topic` means the `my-topic` topic is created in the `my-namespace` namespace of the `my-tenant` tenant.
 
@@ -306,18 +415,19 @@ A subscription is a named configuration rule that determines how messages are de
 
 ![Subscription types](/assets/pulsar-subscription-types.png)
 
-> **Pub-Sub or Queuing**
+> **Pub-Sub or Queuing**  
 > In Pulsar, you can use different subscriptions flexibly.
 > * If you want to achieve traditional "fan-out pub-sub messaging" among consumers, specify a unique subscription name for each consumer. It is exclusive subscription type.
 > * If you want to achieve "message queuing" among consumers, share the same subscription name among multiple consumers(shared, failover, key_shared).
 > * If you want to achieve both effects simultaneously, combine exclusive subscription type with other subscription types for consumers.
 
 ### Subscription types
+
 When a subscription has no consumers, its subscription type is undefined. The type of a subscription is defined when a consumer connects to it, and the type can be changed by restarting all consumers with a different configuration.
 
 #### Exclusive
 
-In *exclusive* type, only a single consumer is allowed to attach to the subscription. If multiple consumers subscribe to a topic using the same subscription, an error occurs.
+In *Exclusive* type, only a single consumer is allowed to attach to the subscription. If multiple consumers subscribe to a topic using the same subscription, an error occurs.
 
 In the diagram below, only **Consumer A-0** is allowed to consume messages.
 
@@ -339,11 +449,11 @@ In the diagram below, **Consumer-B-0** is the master consumer while **Consumer-B
 
 #### Shared
 
-In *shared* or *round robin* mode, multiple consumers can attach to the same subscription. Messages are delivered in a round robin distribution across consumers, and any given message is delivered to only one consumer. When a consumer disconnects, all the messages that were sent to it and not acknowledged will be rescheduled for sending to the remaining consumers.
+In *shared* or *round robin* type, multiple consumers can attach to the same subscription. Messages are delivered in a round robin distribution across consumers, and any given message is delivered to only one consumer. When a consumer disconnects, all the messages that were sent to it and not acknowledged will be rescheduled for sending to the remaining consumers.
 
 In the diagram below, **Consumer-C-1** and **Consumer-C-2** are able to subscribe to the topic, but **Consumer-C-3** and others could as well.
 
-> **Limitations of Shared type**
+> **Limitations of Shared type**  
 > When using Shared type, be aware that:
 > * Message ordering is not guaranteed.
 > * You cannot use cumulative acknowledgment with Shared type.
@@ -352,17 +462,61 @@ In the diagram below, **Consumer-C-1** and **Consumer-C-2** are able to subscrib
 
 #### Key_Shared
 
-In *Key_Shared* mode, multiple consumers can attach to the same subscription. Messages are delivered in a distribution across consumers and message with same key or same ordering key are delivered to only one consumer. No matter how many times the message is re-delivered, it is delivered to the same consumer. When a consumer connected or disconnected will cause served consumer change for some key of message.
+In *Key_Shared* type, multiple consumers can attach to the same subscription. Messages are delivered in a distribution across consumers and message with same key or same ordering key are delivered to only one consumer. No matter how many times the message is re-delivered, it is delivered to the same consumer. When a consumer connected or disconnected will cause served consumer change for some key of message.
+
+![Key_Shared subscriptions](/assets/pulsar-key-shared-subscriptions.png)
+
+Note that when the consumers are using the Key_Shared subscription type, you need to **disable batching** or **use key-based batching** for the producers. There are two reasons why the key-based batching is necessary for Key_Shared subscription type:
+1. The broker dispatches messages according to the keys of the messages, but the default batching approach might fail to pack the messages with the same key to the same batch. 
+2. Since it is the consumers instead of the broker who dispatch the messages from the batches, the key of the first message in one batch is considered as the key of all messages in this batch, thereby leading to context errors. 
+
+The key-based batching aims at resolving the above-mentioned issues. This batching method ensures that the producers pack the messages with the same key to the same batch. The messages without a key are packed into one batch and this batch has no key. When the broker dispatches messages from this batch, it uses `NON_KEY` as the key. In addition, each consumer is associated with **only one** key and should receive **only one message batch** for the connected key. By default, you can limit [...]
+
+Below are examples of enabling the key-based batching under the Key_Shared subscription type, with `client` being the Pulsar client that you created.
+
+<Tabs 
+  defaultValue="Java"
+  values={[{"label":"Java","value":"Java"},{"label":"C++","value":"C++"},{"label":"Python","value":"Python"}]}>
+<TabItem value="Java">
+
+```
+
+Producer<byte[]> producer = client.newProducer()
+        .topic("my-topic")
+        .batcherBuilder(BatcherBuilder.KEY_BASED)
+        .create();
+
+```
 
-> **Limitations of Key_Shared type**
+</TabItem>
+<TabItem value="C++">
+
+```
+
+ProducerConfiguration producerConfig;
+producerConfig.setBatchingType(ProducerConfiguration::BatchingType::KeyBasedBatching);
+Producer producer;
+client.createProducer("my-topic", producerConfig, producer);
+
+```
+
+</TabItem>
+<TabItem value="Python">
+
+```
+
+producer = client.create_producer(topic='my-topic', batching_type=pulsar.BatchingType.KeyBased)
+
+```
+
+</TabItem>
+
+</Tabs>
+
+> **Limitations of Key_Shared type**  
 > When you use Key_Shared type, be aware that:
 > * You need to specify a key or orderingKey for messages.
 > * You cannot use cumulative acknowledgment with Key_Shared type.
-> * Your producers should disable batching or use a key-based batch builder.
-
-![Key_Shared subscriptions](/assets/pulsar-key-shared-subscriptions.png)
-
-**You can disable Key_Shared subscription in the `broker.config` file.**
 
 ### Subscription modes
 
@@ -371,6 +525,7 @@ In *Key_Shared* mode, multiple consumers can attach to the same subscription. Me
 The subscription mode indicates the cursor type. 
 
 - When a subscription is created, an associated cursor is created to record the last consumed position. 
+
 - When a consumer of the subscription restarts, it can continue consuming from the last message it consumes.
 
 Subscription mode | Description | Note
@@ -434,7 +589,7 @@ When a consumer subscribes to a Pulsar topic, by default it subscribes to one sp
 
 When subscribing to multiple topics, the Pulsar client automatically makes a call to the Pulsar API to discover the topics that match the regex pattern/list, and then subscribe to all of them. If any of the topics do not exist, the consumer auto-subscribes to them once the topics are created.
 
-> **No ordering guarantees across multiple topics**
+> **No ordering guarantees across multiple topics**  
 > When a producer sends messages to a single topic, all messages are guaranteed to be read from that topic in the same order. However, these guarantees do not hold across multiple topics. So when a producer sends message to multiple topics, the order in which messages are read from those topics is not guaranteed to be the same.
 
 The following are multi-topic subscription examples for Java.
@@ -480,7 +635,7 @@ The **Topic1** topic has five partitions (**P0** through **P4**) split across th
 
 Messages for this topic are broadcast to two consumers. The [routing mode](#routing-modes) determines each message should be published to which partition, while the [subscription type](#subscription-types) determines which messages go to which consumers.
 
-Decisions about routing and subscription types can be made separately in most cases. In general, throughput concerns should guide partitioning/routing decisions while subscription decisions should be guided by application semantics.
+Decisions about routing and subscription modes can be made separately in most cases. In general, throughput concerns should guide partitioning/routing decisions while subscription decisions should be guided by application semantics.
 
 There is no difference between partitioned topics and normal topics in terms of how subscription types work, as partitioning only determines what happens between when a message is published by a producer and processed and acknowledged by a consumer.
 
@@ -577,6 +732,22 @@ Producer<byte[]> producer = client.newProducer()
 
 ```
 
+## Message redelivery
+
+Apache Pulsar supports graceful failure handling and ensures critical data is not lost. Software will always have unexpected conditions and at times messages may not be delivered successfully. Therefore, it is important to have a built-in mechanism that handles failure, particularly in asynchronous messaging as highlighted in the following examples.
+
+- Consumers get disconnected from the database or the HTTP server. When this happens, the database is temporarily offline while the consumer is writing the data to it and the external HTTP server that the consumer calls is momentarily unavailable.
+- Consumers get disconnected from a broker due to consumer crashes, broken connections, etc. As a consequence, the unacknowledged messages are delivered to other available consumers.
+
+Apache Pulsar avoids these and other message delivery failures using at-least-once delivery semantics that ensure Pulsar processes a message more than once. 
+
+To utilize message redelivery, you need to enable this mechanism before the broker can resend the unacknowledged messages in Apache Pulsar client. You can activate the message redelivery mechanism in Apache Pulsar using three methods. 
+
+- [Negative Acknowledgment](#negative-acknowledgement)
+- [Acknowledgement Timeout](#acknowledgement-timeout)
+- [Retry letter topic](#retry-letter-topic)
+
+
 ## Message retention and expiry
 
 By default, Pulsar message brokers:
@@ -626,7 +797,7 @@ Message deduplication makes Pulsar an ideal messaging system to be used in conju
 > You can find more in-depth information in [this post](https://www.splunk.com/en_us/blog/it/exactly-once-is-not-exactly-the-same.html).
 
 ## Delayed message delivery
-Delayed message delivery enables you to consume a message later rather than immediately. In this mechanism, a message is stored in BookKeeper, `DelayedDeliveryTracker` maintains the time index(time -> messageId) in memory after published to a broker, and it is delivered to a consumer once the specific delayed time is passed.  
+Delayed message delivery enables you to consume a message later. In this mechanism, a message is stored in BookKeeper. The `DelayedDeliveryTracker` maintains the time index (time -> messageId) in memory after the message is published to a broker. This message will be delivered to a consumer once the specified delay is over.  
 
 Delayed message delivery only works in Shared subscription type. In Exclusive and Failover subscription types, the delayed message is dispatched immediately.
 
diff --git a/site2/website-next/versioned_docs/version-2.2.0/concepts-multi-tenancy.md b/site2/website-next/versioned_docs/version-2.2.0/concepts-multi-tenancy.md
index be752cc..8a17e72 100644
--- a/site2/website-next/versioned_docs/version-2.2.0/concepts-multi-tenancy.md
+++ b/site2/website-next/versioned_docs/version-2.2.0/concepts-multi-tenancy.md
@@ -2,7 +2,6 @@
 id: concepts-multi-tenancy
 title: Multi Tenancy
 sidebar_label: "Multi Tenancy"
-original_id: concepts-multi-tenancy
 ---
 
 Pulsar was created from the ground up as a multi-tenant system. To support multi-tenancy, Pulsar has a concept of tenants. Tenants can be spread across clusters and can each have their own [authentication and authorization](security-overview) scheme applied to them. They are also the administrative unit at which storage quotas, [message TTL](cookbooks-retention-expiry.md#time-to-live-ttl), and isolation policies can be managed.
diff --git a/site2/website-next/versioned_docs/version-2.2.0/concepts-overview.md b/site2/website-next/versioned_docs/version-2.2.0/concepts-overview.md
index b903fa4..c76032c 100644
--- a/site2/website-next/versioned_docs/version-2.2.0/concepts-overview.md
+++ b/site2/website-next/versioned_docs/version-2.2.0/concepts-overview.md
@@ -2,7 +2,6 @@
 id: concepts-overview
 title: Pulsar Overview
 sidebar_label: "Overview"
-original_id: concepts-overview
 ---
 
 Pulsar is a multi-tenant, high-performance solution for server-to-server messaging. Pulsar was originally developed by Yahoo, it is under the stewardship of the [Apache Software Foundation](https://www.apache.org/).
diff --git a/site2/website-next/versioned_docs/version-2.2.0/concepts-replication.md b/site2/website-next/versioned_docs/version-2.2.0/concepts-replication.md
index 6e23962..11677cc 100644
--- a/site2/website-next/versioned_docs/version-2.2.0/concepts-replication.md
+++ b/site2/website-next/versioned_docs/version-2.2.0/concepts-replication.md
@@ -2,7 +2,6 @@
 id: concepts-replication
 title: Geo Replication
 sidebar_label: "Geo Replication"
-original_id: concepts-replication
 ---
 
 Pulsar enables messages to be produced and consumed in different geo-locations. For instance, your application may be publishing data in one region or market and you would like to process it for consumption in other regions or markets. [Geo-replication](administration-geo) in Pulsar enables you to do that.
diff --git a/site2/website-next/versioned_docs/version-2.2.0/concepts-schema-registry.md b/site2/website-next/versioned_docs/version-2.2.0/concepts-schema-registry.md
index d8f106a..d28c9e6 100644
--- a/site2/website-next/versioned_docs/version-2.2.0/concepts-schema-registry.md
+++ b/site2/website-next/versioned_docs/version-2.2.0/concepts-schema-registry.md
@@ -2,111 +2,4 @@
 id: concepts-schema-registry
 title: Schema Registry
 sidebar_label: "Schema Registry"
-original_id: concepts-schema-registry
 ---
-
-Type safety is extremely important in any application built around a message bus like Pulsar. Producers and consumers need some kind of mechanism for coordinating types at the topic level lest a wide variety of potential problems arise (for example serialization and deserialization issues). Applications typically adopt one of two basic approaches to type safety in messaging:
-
-1. A "client-side" approach in which message producers and consumers are responsible for not only serializing and deserializing messages (which consist of raw bytes) but also "knowing" which types are being transmitted via which topics. If a producer is sending temperature sensor data on the topic `topic-1`, consumers of that topic will run into trouble if they attempt to parse that data as, say, moisture sensor readings.
-2. A "server-side" approach in which producers and consumers inform the system which data types can be transmitted via the topic. With this approach, the messaging system enforces type safety and ensures that producers and consumers remain synced.
-
-Both approaches are available in Pulsar, and you're free to adopt one or the other or to mix and match on a per-topic basis.
-
-1. For the "client-side" approach, producers and consumers can send and receive messages consisting of raw byte arrays and leave all type safety enforcement to the application on an "out-of-band" basis.
-1. For the "server-side" approach, Pulsar has a built-in **schema registry** that enables clients to upload data schemas on a per-topic basis. Those schemas dictate which data types are recognized as valid for that topic.
-
-#### Note
->
-> Currently, the Pulsar schema registry is only available for the [Java client](client-libraries-java.md), [CGo client](client-libraries-go.md), [Python client](client-libraries-python.md), and [C++ client](client-libraries-cpp).
-
-## Basic architecture
-
-Schemas are automatically uploaded when you create a typed Producer with a Schema. Additionally, Schemas can be manually uploaded to, fetched from, and updated via Pulsar's {@inject: rest:REST:tag/schemas} API.
-
-> #### Other schema registry backends
-> Out of the box, Pulsar uses the [Apache BookKeeper](concepts-architecture-overview#persistent-storage) log storage system for schema storage. You can, however, use different backends if you wish. Documentation for custom schema storage logic is coming soon.
-
-## How schemas work
-
-Pulsar schemas are applied and enforced *at the topic level* (schemas cannot be applied at the namespace or tenant level). Producers and consumers upload schemas to Pulsar brokers.
-
-Pulsar schemas are fairly simple data structures that consist of:
-
-* A **name**. In Pulsar, a schema's name is the topic to which the schema is applied.
-* A **payload**, which is a binary representation of the schema
-* A schema [**type**](#supported-schema-formats)
-* User-defined **properties** as a string/string map. Usage of properties is wholly application specific. Possible properties might be the Git hash associated with a schema, an environment like `dev` or `prod`, etc.
-
-## Schema versions
-
-In order to illustrate how schema versioning works, let's walk through an example. Imagine that the Pulsar [Java client](client-libraries-java) created using the code below attempts to connect to Pulsar and begin sending messages:
-
-```java
-
-PulsarClient client = PulsarClient.builder()
-        .serviceUrl("pulsar://localhost:6650")
-        .build();
-
-Producer<SensorReading> producer = client.newProducer(JSONSchema.of(SensorReading.class))
-        .topic("sensor-data")
-        .sendTimeout(3, TimeUnit.SECONDS)
-        .create();
-
-```
-
-The table below lists the possible scenarios when this connection attempt occurs and what will happen in light of each scenario:
-
-Scenario | What happens
-:--------|:------------
-No schema exists for the topic | The producer is created using the given schema. The schema is transmitted to the broker and stored (since no existing schema is "compatible" with the `SensorReading` schema). Any consumer created using the same schema/topic can consume messages from the `sensor-data` topic.
-A schema already exists; the producer connects using the same schema that's already stored | The schema is transmitted to the Pulsar broker. The broker determines that the schema is compatible. The broker attempts to store the schema in [BookKeeper](concepts-architecture-overview.md#persistent-storage) but then determines that it's already stored, so it's then used to tag produced messages.
-A schema already exists; the producer connects using a new schema that is compatible | The producer transmits the schema to the broker. The broker determines that the schema is compatible and stores the new schema as the current version (with a new version number).
-
-> Schemas are versioned in succession. Schema storage happens in the broker that handles the associated topic so that version assignments can be made. Once a version is assigned/fetched to/for a schema, all subsequent messages produced by that producer are tagged with the appropriate version.
-
-
-## Supported schema formats
-
-The following formats are supported by the Pulsar schema registry:
-
-* None. If no schema is specified for a topic, producers and consumers will handle raw bytes.
-* `String` (used for UTF-8-encoded strings)
-* [JSON](https://www.json.org/)
-* [Protobuf](https://developers.google.com/protocol-buffers/)
-* [Avro](https://avro.apache.org/)
-
-For usage instructions, see the documentation for your preferred client library:
-
-* [Java](client-libraries-java.md#schemas)
-
-> Support for other schema formats will be added in future releases of Pulsar.
-
-The following example shows how to define an Avro schema using the `GenericSchemaBuilder`, generate a generic Avro schema using `GenericRecordBuilder`, and consume messages into `GenericRecord`.
-
-**Example** 
-
-1. Use the `RecordSchemaBuilder` to build a schema.
-
-   ```java
-   
-   RecordSchemaBuilder recordSchemaBuilder = SchemaBuilder.record("schemaName");
-   recordSchemaBuilder.field("intField").type(SchemaType.INT32);
-   SchemaInfo schemaInfo = recordSchemaBuilder.build(SchemaType.AVRO);
-
-   Producer<GenericRecord> producer = client.newProducer(Schema.generic(schemaInfo)).create();
-   
-   ```
-
-2. Use `RecordBuilder` to build the generic records.
-
-   ```java
-   
-   producer.newMessage().value(schema.newRecordBuilder()
-               .set("intField", 32)
-               .build()).send();
-   
-   ```
-
-## Managing Schemas
-
-You can use Pulsar admin tools to manage schemas for topics.
diff --git a/site2/website-next/versioned_docs/version-2.2.0/concepts-tiered-storage.md b/site2/website-next/versioned_docs/version-2.2.0/concepts-tiered-storage.md
index 0b45b0a..f0bbde6 100644
--- a/site2/website-next/versioned_docs/version-2.2.0/concepts-tiered-storage.md
+++ b/site2/website-next/versioned_docs/version-2.2.0/concepts-tiered-storage.md
@@ -2,7 +2,6 @@
 id: concepts-tiered-storage
 title: Tiered Storage
 sidebar_label: "Tiered Storage"
-original_id: concepts-tiered-storage
 ---
 
 Pulsar's segment oriented architecture allows for topic backlogs to grow very large, effectively without limit. However, this can become expensive over time.
diff --git a/site2/website-next/versioned_docs/version-2.2.0/concepts-topic-compaction.md b/site2/website-next/versioned_docs/version-2.2.0/concepts-topic-compaction.md
index c85e703..3356298 100644
--- a/site2/website-next/versioned_docs/version-2.2.0/concepts-topic-compaction.md
+++ b/site2/website-next/versioned_docs/version-2.2.0/concepts-topic-compaction.md
@@ -2,7 +2,6 @@
 id: concepts-topic-compaction
 title: Topic Compaction
 sidebar_label: "Topic Compaction"
-original_id: concepts-topic-compaction
 ---
 
 Pulsar was built with highly scalable [persistent storage](concepts-architecture-overview.md#persistent-storage) of message data as a primary objective. Pulsar topics enable you to persistently store as many unacknowledged messages as you need while preserving message ordering. By default, Pulsar stores *all* unacknowledged/unprocessed messages produced on a topic. Accumulating many unacknowledged messages on a topic is necessary for many Pulsar use cases but it can also be very time int [...]
diff --git a/site2/website-next/versioned_docs/version-2.2.0/cookbooks-compaction.md b/site2/website-next/versioned_docs/version-2.2.0/cookbooks-compaction.md
index 0a36233..f95f64c 100644
--- a/site2/website-next/versioned_docs/version-2.2.0/cookbooks-compaction.md
+++ b/site2/website-next/versioned_docs/version-2.2.0/cookbooks-compaction.md
@@ -2,7 +2,6 @@
 id: cookbooks-compaction
 title: Topic compaction
 sidebar_label: "Topic compaction"
-original_id: cookbooks-compaction
 ---
 
 Pulsar's [topic compaction](concepts-topic-compaction.md#compaction) feature enables you to create **compacted** topics in which older, "obscured" entries are pruned from the topic, allowing for faster reads through the topic's history (which messages are deemed obscured/outdated/irrelevant will depend on your use case).
diff --git a/site2/website-next/versioned_docs/version-2.2.0/cookbooks-deduplication.md b/site2/website-next/versioned_docs/version-2.2.0/cookbooks-deduplication.md
index 1669afa..307fe03 100644
--- a/site2/website-next/versioned_docs/version-2.2.0/cookbooks-deduplication.md
+++ b/site2/website-next/versioned_docs/version-2.2.0/cookbooks-deduplication.md
@@ -1,8 +1,7 @@
 ---
 id: cookbooks-deduplication
 title: Message deduplication
-sidebar_label: "Message deduplication"
-original_id: cookbooks-deduplication
+sidebar_label: "Message deduplication "
 ---
 
 import Tabs from '@theme/Tabs';
diff --git a/site2/website-next/versioned_docs/version-2.2.0/cookbooks-encryption.md b/site2/website-next/versioned_docs/version-2.2.0/cookbooks-encryption.md
index f0d8fb8..fbd1c97 100644
--- a/site2/website-next/versioned_docs/version-2.2.0/cookbooks-encryption.md
+++ b/site2/website-next/versioned_docs/version-2.2.0/cookbooks-encryption.md
@@ -1,8 +1,7 @@
 ---
 id: cookbooks-encryption
 title: Pulsar Encryption
-sidebar_label: "Encryption"
-original_id: cookbooks-encryption
+sidebar_label: "Encryption "
 ---
 
 Pulsar encryption allows applications to encrypt messages at the producer and decrypt at the consumer. Encryption is performed using the public/private key pair configured by the application. Encrypted messages can only be decrypted by consumers with a valid key.
diff --git a/site2/website-next/versioned_docs/version-2.2.0/cookbooks-message-queue.md b/site2/website-next/versioned_docs/version-2.2.0/cookbooks-message-queue.md
index eb43cbd..9b93a94 100644
--- a/site2/website-next/versioned_docs/version-2.2.0/cookbooks-message-queue.md
+++ b/site2/website-next/versioned_docs/version-2.2.0/cookbooks-message-queue.md
@@ -2,7 +2,6 @@
 id: cookbooks-message-queue
 title: Using Pulsar as a message queue
 sidebar_label: "Message queue"
-original_id: cookbooks-message-queue
 ---
 
 Message queues are essential components of many large-scale data architectures. If every single work object that passes through your system absolutely *must* be processed in spite of the slowness or downright failure of this or that system component, there's a good chance that you'll need a message queue to step in and ensure that unprocessed data is retained---with correct ordering---until the required actions are taken.
diff --git a/site2/website-next/versioned_docs/version-2.2.0/cookbooks-non-persistent.md b/site2/website-next/versioned_docs/version-2.2.0/cookbooks-non-persistent.md
index 391569a..d40c4fb 100644
--- a/site2/website-next/versioned_docs/version-2.2.0/cookbooks-non-persistent.md
+++ b/site2/website-next/versioned_docs/version-2.2.0/cookbooks-non-persistent.md
@@ -2,7 +2,6 @@
 id: cookbooks-non-persistent
 title: Non-persistent messaging
 sidebar_label: "Non-persistent messaging"
-original_id: cookbooks-non-persistent
 ---
 
 **Non-persistent topics** are Pulsar topics in which message data is *never* [persistently stored](concepts-architecture-overview.md#persistent-storage) and kept only in memory. This cookbook provides:
diff --git a/site2/website-next/versioned_docs/version-2.2.0/cookbooks-partitioned.md b/site2/website-next/versioned_docs/version-2.2.0/cookbooks-partitioned.md
index 7882fb9..2589693 100644
--- a/site2/website-next/versioned_docs/version-2.2.0/cookbooks-partitioned.md
+++ b/site2/website-next/versioned_docs/version-2.2.0/cookbooks-partitioned.md
@@ -2,6 +2,5 @@
 id: cookbooks-partitioned
 title: Partitioned topics
 sidebar_label: "Partitioned Topics"
-original_id: cookbooks-partitioned
 ---
 For details of the content, refer to [manage topics](admin-api-topics).
\ No newline at end of file
diff --git a/site2/website-next/versioned_docs/version-2.2.0/cookbooks-retention-expiry.md b/site2/website-next/versioned_docs/version-2.2.0/cookbooks-retention-expiry.md
index b9353b5..738cf42 100644
--- a/site2/website-next/versioned_docs/version-2.2.0/cookbooks-retention-expiry.md
+++ b/site2/website-next/versioned_docs/version-2.2.0/cookbooks-retention-expiry.md
@@ -2,7 +2,6 @@
 id: cookbooks-retention-expiry
 title: Message retention and expiry
 sidebar_label: "Message retention and expiry"
-original_id: cookbooks-retention-expiry
 ---
 
 import Tabs from '@theme/Tabs';
@@ -36,7 +35,7 @@ By default, when a Pulsar message arrives at a broker, the message is stored unt
 
 Retention policies are useful when you use the Reader interface. The Reader interface does not use acknowledgements, and messages do not exist within backlogs. It is required to configure retention for Reader-only use cases.
 
-When you set a retention policy on topics in a namespace, you must set **both** a *size limit* and a *time limit*. You can refer to the following table to set retention policies in `pulsar-admin` and Java.
+When you set a retention policy on topics in a namespace, you must set **both** a *size limit* (via `defaultRetentionSizeInMB`) and a *time limit* (via `defaultRetentionTimeInMinutes`) . You can refer to the following table to set retention policies in `pulsar-admin` and Java.
 
 |Time limit|Size limit| Message retention      |
 |----------|----------|------------------------|
@@ -152,7 +151,10 @@ admin.namespaces().setRetention(namespace, policies);
 
 You can fetch the retention policy for a namespace by specifying the namespace. The output will be a JSON object with two keys: `retentionTimeInMinutes` and `retentionSizeInMB`.
 
-#### pulsar-admin
+<Tabs 
+  defaultValue="pulsar-admin"
+  values={[{"label":"pulsar-admin","value":"pulsar-admin"},{"label":"REST API","value":"REST API"},{"label":"Java","value":"Java"}]}>
+<TabItem value="pulsar-admin">
 
 Use the [`get-retention`](reference-pulsar-admin.md#namespaces) subcommand and specify the namespace.
 
@@ -168,11 +170,13 @@ $ pulsar-admin namespaces get-retention my-tenant/my-ns
 
 ```
 
-#### REST API
+</TabItem>
+<TabItem value="REST API">
 
 {@inject: endpoint|GET|/admin/v2/namespaces/:tenant/:namespace/retention|operation/getRetention?version=@pulsar:version_number@}
 
-#### Java
+</TabItem>
+<TabItem value="Java">
 
 ```java
 
@@ -180,15 +184,17 @@ admin.namespaces().getRetention(namespace);
 
 ```
 
+</TabItem>
+
+</Tabs>
+
 ## Backlog quotas
 
 *Backlogs* are sets of unacknowledged messages for a topic that have been stored by bookies. Pulsar stores all unacknowledged messages in backlogs until they are processed and acknowledged.
 
-You can control the allowable size of backlogs, at the namespace level, using *backlog quotas*. Setting a backlog quota involves setting:
+You can control the allowable size and/or time of backlogs, at the namespace level, using *backlog quotas*. Setting a backlog quota involves setting:
 
-TODO: Expand on is this per backlog or per topic?
-
-* an allowable *size threshold* for each topic in the namespace
+* an allowable *size and/or time threshold* for each topic in the namespace
 * a *retention policy* that determines which action the [broker](reference-terminology.md#broker) takes if the threshold is exceeded.
 
 The following retention policies are available:
@@ -210,9 +216,12 @@ Backlog quotas are handled at the namespace level. They can be managed via:
 
 You can set a size and/or time threshold and backlog retention policy for all of the topics in a [namespace](reference-terminology.md#namespace) by specifying the namespace, a size limit and/or a time limit in second, and a policy by name.
 
-#### pulsar-admin
+<Tabs 
+  defaultValue="pulsar-admin"
+  values={[{"label":"pulsar-admin","value":"pulsar-admin"},{"label":"REST API","value":"REST API"},{"label":"Java","value":"Java"}]}>
+<TabItem value="pulsar-admin">
 
-Use the [`set-backlog-quota`](reference-pulsar-admin.md#namespaces) subcommand and specify a namespace, a size limit using the `-l`/`--limit` flag, and a retention policy using the `-p`/`--policy` flag.
+Use the [`set-backlog-quota`](reference-pulsar-admin.md#namespaces) subcommand and specify a namespace, a size limit using the `-l`/`--limit` , `-lt`/`--limitTime` flag to limit backlog, a retention policy using the `-p`/`--policy` flag and a policy type using `-t`/`--type` (default is destination_storage).
 
 ##### Example
 
@@ -220,16 +229,26 @@ Use the [`set-backlog-quota`](reference-pulsar-admin.md#namespaces) subcommand a
 
 $ pulsar-admin namespaces set-backlog-quota my-tenant/my-ns \
   --limit 2G \
-  --limitTime 36000 \
   --policy producer_request_hold
 
 ```
 
-#### REST API
+```shell
+
+$ pulsar-admin namespaces set-backlog-quota my-tenant/my-ns/my-topic \
+--limitTime 3600 \
+--policy producer_request_hold \
+--type message_age
+
+```
+
+</TabItem>
+<TabItem value="REST API">
 
 {@inject: endpoint|POST|/admin/v2/namespaces/:tenant/:namespace/backlogQuota|operation/getBacklogQuotaMap?version=@pulsar:version_number@}
 
-#### Java
+</TabItem>
+<TabItem value="Java">
 
 ```java
 
@@ -240,11 +259,18 @@ admin.namespaces().setBacklogQuota(namespace, quota);
 
 ```
 
+</TabItem>
+
+</Tabs>
+
 ### Get backlog threshold and backlog retention policy
 
 You can see which size threshold and backlog retention policy has been applied to a namespace.
 
-#### pulsar-admin
+<Tabs 
+  defaultValue="pulsar-admin"
+  values={[{"label":"pulsar-admin","value":"pulsar-admin"},{"label":"REST API","value":"REST API"},{"label":"Java","value":"Java"}]}>
+<TabItem value="pulsar-admin">
 
 Use the [`get-backlog-quotas`](reference-pulsar-admin.md#pulsar-admin-namespaces-get-backlog-quotas) subcommand and specify a namespace. Here's an example:
 
@@ -260,11 +286,13 @@ $ pulsar-admin namespaces get-backlog-quotas my-tenant/my-ns
 
 ```
 
-#### REST API
+</TabItem>
+<TabItem value="REST API">
 
 {@inject: endpoint|GET|/admin/v2/namespaces/:tenant/:namespace/backlogQuotaMap|operation/getBacklogQuotaMap?version=@pulsar:version_number@}
 
-#### Java
+</TabItem>
+<TabItem value="Java">
 
 ```java
 
@@ -273,11 +301,18 @@ Map<BacklogQuota.BacklogQuotaType,BacklogQuota> quotas =
 
 ```
 
+</TabItem>
+
+</Tabs>
+
 ### Remove backlog quotas
 
-#### pulsar-admin
+<Tabs 
+  defaultValue="pulsar-admin"
+  values={[{"label":"pulsar-admin","value":"pulsar-admin"},{"label":"REST API","value":"REST API"},{"label":"Java","value":"Java"}]}>
+<TabItem value="pulsar-admin">
 
-Use the [`remove-backlog-quota`](reference-pulsar-admin.md#pulsar-admin-namespaces-remove-backlog-quota) subcommand and specify a namespace. Here's an example:
+Use the [`remove-backlog-quota`](reference-pulsar-admin.md#pulsar-admin-namespaces-remove-backlog-quota) subcommand and specify a namespace, use `t`/`--type` to specify backlog type to remove(default is destination_storage). Here's an example:
 
 ```shell
 
@@ -285,11 +320,13 @@ $ pulsar-admin namespaces remove-backlog-quota my-tenant/my-ns
 
 ```
 
-#### REST API
+</TabItem>
+<TabItem value="REST API">
 
 {@inject: endpoint|DELETE|/admin/v2/namespaces/:tenant/:namespace/backlogQuota|operation/removeBacklogQuota?version=@pulsar:version_number@}
 
-#### Java
+</TabItem>
+<TabItem value="Java">
 
 ```java
 
@@ -297,6 +334,10 @@ admin.namespaces().removeBacklogQuota(namespace);
 
 ```
 
+</TabItem>
+
+</Tabs>
+
 ### Clear backlog
 
 #### pulsar-admin
@@ -319,7 +360,10 @@ By default, Pulsar stores all unacknowledged messages forever. This can lead to
 
 ### Set the TTL for a namespace
 
-#### pulsar-admin
+<Tabs 
+  defaultValue="pulsar-admin"
+  values={[{"label":"pulsar-admin","value":"pulsar-admin"},{"label":"REST API","value":"REST API"},{"label":"Java","value":"Java"}]}>
+<TabItem value="pulsar-admin">
 
 Use the [`set-message-ttl`](reference-pulsar-admin.md#pulsar-admin-namespaces-set-message-ttl) subcommand and specify a namespace and a TTL (in seconds) using the `-ttl`/`--messageTTL` flag.
 
@@ -332,11 +376,13 @@ $ pulsar-admin namespaces set-message-ttl my-tenant/my-ns \
 
 ```
 
-#### REST API
+</TabItem>
+<TabItem value="REST API">
 
 {@inject: endpoint|POST|/admin/v2/namespaces/:tenant/:namespace/messageTTL|operation/setNamespaceMessageTTL?version=@pulsar:version_number@}
 
-#### Java
+</TabItem>
+<TabItem value="Java">
 
 ```java
 
@@ -344,9 +390,16 @@ admin.namespaces().setNamespaceMessageTTL(namespace, ttlInSeconds);
 
 ```
 
+</TabItem>
+
+</Tabs>
+
 ### Get the TTL configuration for a namespace
 
-#### pulsar-admin
+<Tabs 
+  defaultValue="pulsar-admin"
+  values={[{"label":"pulsar-admin","value":"pulsar-admin"},{"label":"REST API","value":"REST API"},{"label":"Java","value":"Java"}]}>
+<TabItem value="pulsar-admin">
 
 Use the [`get-message-ttl`](reference-pulsar-admin.md#pulsar-admin-namespaces-get-message-ttl) subcommand and specify a namespace.
 
@@ -359,11 +412,13 @@ $ pulsar-admin namespaces get-message-ttl my-tenant/my-ns
 
 ```
 
-#### REST API
+</TabItem>
+<TabItem value="REST API">
 
 {@inject: endpoint|GET|/admin/v2/namespaces/:tenant/:namespace/messageTTL|operation/getNamespaceMessageTTL?version=@pulsar:version_number@}
 
-#### Java
+</TabItem>
+<TabItem value="Java">
 
 ```java
 
@@ -371,9 +426,16 @@ admin.namespaces().getNamespaceMessageTTL(namespace)
 
 ```
 
+</TabItem>
+
+</Tabs>
+
 ### Remove the TTL configuration for a namespace
 
-#### pulsar-admin
+<Tabs 
+  defaultValue="pulsar-admin"
+  values={[{"label":"pulsar-admin","value":"pulsar-admin"},{"label":"REST API","value":"REST API"},{"label":"Java","value":"Java"}]}>
+<TabItem value="pulsar-admin">
 
 Use the [`remove-message-ttl`](reference-pulsar-admin.md#pulsar-admin-namespaces-remove-message-ttl) subcommand and specify a namespace.
 
@@ -385,11 +447,13 @@ $ pulsar-admin namespaces remove-message-ttl my-tenant/my-ns
 
 ```
 
-#### REST API
+</TabItem>
+<TabItem value="REST API">
 
 {@inject: endpoint|DELETE|/admin/v2/namespaces/:tenant/:namespace/messageTTL|operation/removeNamespaceMessageTTL?version=@pulsar:version_number@}
 
-#### Java
+</TabItem>
+<TabItem value="Java">
 
 ```java
 
@@ -397,6 +461,10 @@ admin.namespaces().removeNamespaceMessageTTL(namespace)
 
 ```
 
+</TabItem>
+
+</Tabs>
+
 ## Delete messages from namespaces
 
 If you do not have any retention period and that you never have much of a backlog, the upper limit for retaining messages, which are acknowledged, equals to the Pulsar segment rollover period + entry log rollover period + (garbage collection interval * garbage collection ratios).
diff --git a/site2/website-next/versioned_docs/version-2.2.0/deploy-kubernetes.md b/site2/website-next/versioned_docs/version-2.2.0/deploy-kubernetes.md
index dc7123d..4e170dc 100644
--- a/site2/website-next/versioned_docs/version-2.2.0/deploy-kubernetes.md
+++ b/site2/website-next/versioned_docs/version-2.2.0/deploy-kubernetes.md
@@ -2,7 +2,6 @@
 id: deploy-kubernetes
 title: Deploy Pulsar on Kubernetes
 sidebar_label: "Kubernetes"
-original_id: deploy-kubernetes
 ---
 
 To get up and running with these charts as fast as possible, in a **non-production** use case, we provide
diff --git a/site2/website-next/versioned_docs/version-2.2.0/deploy-monitoring.md b/site2/website-next/versioned_docs/version-2.2.0/deploy-monitoring.md
index 074ce3f..95ccdd6 100644
--- a/site2/website-next/versioned_docs/version-2.2.0/deploy-monitoring.md
+++ b/site2/website-next/versioned_docs/version-2.2.0/deploy-monitoring.md
@@ -2,7 +2,6 @@
 id: deploy-monitoring
 title: Monitor
 sidebar_label: "Monitor"
-original_id: deploy-monitoring
 ---
 
 You can use different ways to monitor a Pulsar cluster, exposing both metrics related to the usage of topics and the overall health of the individual components of the cluster.
@@ -127,17 +126,7 @@ The per-topic dashboard instructions are available at [Pulsar manager](administr
 
 You can use grafana to create dashboard driven by the data that is stored in Prometheus.
 
-When you deploy Pulsar on Kubernetes, a `pulsar-grafana` Docker image is enabled by default. You can use the docker image with the principal dashboards.
-
-Enter the command below to use the dashboard manually:
-
-```shell
-
-docker run -p3000:3000 \
-        -e PROMETHEUS_URL=http://$PROMETHEUS_HOST:9090/ \
-        apachepulsar/pulsar-grafana:latest
-
-```
+When you deploy Pulsar on Kubernetes with the Pulsar Helm Chart, a `pulsar-grafana` Docker image is enabled by default. You can use the docker image with the principal dashboards.
 
 The following are some Grafana dashboards examples:
 
@@ -145,4 +134,4 @@ The following are some Grafana dashboards examples:
 - [apache-pulsar-grafana-dashboard](https://github.com/streamnative/apache-pulsar-grafana-dashboard): a collection of Grafana dashboard templates for different Pulsar components running on both Kubernetes and on-premise machines.
 
 ## Alerting rules
-You can set alerting rules according to your Pulsar environment. To configure alerting rules for Apache Pulsar, refer to [alerting rules](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/).
+You can set alerting rules according to your Pulsar environment. To configure alerting rules for Apache Pulsar, refer to [alerting rules](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/).
\ No newline at end of file
diff --git a/site2/website-next/versioned_docs/version-2.2.0/developing-binary-protocol.md b/site2/website-next/versioned_docs/version-2.2.0/develop-binary-protocol.md
similarity index 100%
rename from site2/website-next/versioned_docs/version-2.2.0/developing-binary-protocol.md
rename to site2/website-next/versioned_docs/version-2.2.0/develop-binary-protocol.md
diff --git a/site2/website-next/versioned_docs/version-2.2.0/developing-cpp.md b/site2/website-next/versioned_docs/version-2.2.0/develop-cpp.md
similarity index 100%
rename from site2/website-next/versioned_docs/version-2.2.0/developing-cpp.md
rename to site2/website-next/versioned_docs/version-2.2.0/develop-cpp.md
diff --git a/site2/website-next/versioned_docs/version-2.2.0/developing-load-manager.md b/site2/website-next/versioned_docs/version-2.2.0/develop-load-manager.md
similarity index 100%
rename from site2/website-next/versioned_docs/version-2.2.0/developing-load-manager.md
rename to site2/website-next/versioned_docs/version-2.2.0/develop-load-manager.md
diff --git a/site2/website-next/versioned_docs/version-2.2.0/developing-schema.md b/site2/website-next/versioned_docs/version-2.2.0/develop-schema.md
similarity index 100%
rename from site2/website-next/versioned_docs/version-2.2.0/developing-schema.md
rename to site2/website-next/versioned_docs/version-2.2.0/develop-schema.md
diff --git a/site2/website-next/versioned_docs/version-2.2.0/develop-tools.md b/site2/website-next/versioned_docs/version-2.2.0/develop-tools.md
index b545779..d034926 100644
--- a/site2/website-next/versioned_docs/version-2.2.0/develop-tools.md
+++ b/site2/website-next/versioned_docs/version-2.2.0/develop-tools.md
@@ -2,7 +2,6 @@
 id: develop-tools
 title: Simulation tools
 sidebar_label: "Simulation tools"
-original_id: develop-tools
 ---
 
 It is sometimes necessary create an test environment and incur artificial load to observe how well load managers
diff --git a/site2/website-next/versioned_docs/version-2.2.0/functions-metrics.md b/site2/website-next/versioned_docs/version-2.2.0/functions-metrics.md
index 8add669..e76c556 100644
--- a/site2/website-next/versioned_docs/version-2.2.0/functions-metrics.md
+++ b/site2/website-next/versioned_docs/version-2.2.0/functions-metrics.md
@@ -2,6 +2,5 @@
 id: functions-metrics
 title: Metrics for Pulsar Functions
 sidebar_label: "Metrics"
-original_id: functions-metrics
 ---
 
diff --git a/site2/website-next/versioned_docs/version-2.2.0/functions-state.md b/site2/website-next/versioned_docs/version-2.2.0/functions-state.md
index d3c7c78..a858759 100644
--- a/site2/website-next/versioned_docs/version-2.2.0/functions-state.md
+++ b/site2/website-next/versioned_docs/version-2.2.0/functions-state.md
@@ -2,196 +2,4 @@
 id: functions-state
 title: Pulsar Functions State Storage (Developer Preview)
 sidebar_label: "State Storage"
-original_id: functions-state
 ---
-
-Since Pulsar 2.1.0 release, Pulsar integrates with Apache BookKeeper [table service](https://docs.google.com/document/d/155xAwWv5IdOitHh1NVMEwCMGgB28M3FyMiQSxEpjE-Y/edit#heading=h.56rbh52koe3f)
-for storing the `State` for functions. For example, A `WordCount` function can store its `counters` state into BookKeeper's table service via Pulsar Functions [State API](#api).
-
-## API
-
-### Java API
-
-Currently Pulsar Functions expose following APIs for mutating and accessing State. These APIs are available in the [Context](functions-api.md#context) object when
-you are using [Java SDK](functions-api.md#java-sdk-functions) functions.
-
-#### incrCounter
-
-```java
-
-    /**
-     * Increment the builtin distributed counter referred by key
-     * @param key The name of the key
-     * @param amount The amount to be incremented
-     */
-    void incrCounter(String key, long amount);
-
-```
-
-The application can use `incrCounter` to change the counter of a given `key` by the given `amount`.
-
-#### incrCounterAsync
-
-```java
-
-     /**
-     * Increment the builtin distributed counter referred by key
-     * but dont wait for the completion of the increment operation
-     *
-     * @param key The name of the key
-     * @param amount The amount to be incremented
-     */
-    CompletableFuture<Void> incrCounterAsync(String key, long amount);
-
-```
-
-The application can use `incrCounterAsync` to asynchronously change the counter of a given `key` by the given `amount`.
-
-#### getCounter
-
-```java
-
-    /**
-     * Retrieve the counter value for the key.
-     *
-     * @param key name of the key
-     * @return the amount of the counter value for this key
-     */
-    long getCounter(String key);
-
-```
-
-The application can use `getCounter` to retrieve the counter of a given `key` mutated by `incrCounter`.
-
-Besides the `counter` API, Pulsar also exposes a general key/value API for functions to store
-general key/value state.
-
-#### getCounterAsync
-
-```java
-
-     /**
-     * Retrieve the counter value for the key, but don't wait
-     * for the operation to be completed
-     *
-     * @param key name of the key
-     * @return the amount of the counter value for this key
-     */
-    CompletableFuture<Long> getCounterAsync(String key);
-
-```
-
-The application can use `getCounterAsync` to asynchronously retrieve the counter of a given `key` mutated by `incrCounterAsync`.
-
-#### putState
-
-```java
-
-    /**
-     * Update the state value for the key.
-     *
-     * @param key name of the key
-     * @param value state value of the key
-     */
-    void putState(String key, ByteBuffer value);
-
-```
-
-#### putStateAsync
-
-```java
-
-    /**
-     * Update the state value for the key, but don't wait for the operation to be completed
-     *
-     * @param key name of the key
-     * @param value state value of the key
-     */
-    CompletableFuture<Void> putStateAsync(String key, ByteBuffer value);
-
-```
-
-The application can use `putStateAsync` to asynchronously update the state of a given `key`.
-
-#### getState
-
-```
-
-    /**
-     * Retrieve the state value for the key.
-     *
-     * @param key name of the key
-     * @return the state value for the key.
-     */
-    ByteBuffer getState(String key);
-
-```
-
-#### getStateAsync
-
-```java
-
-    /**
-     * Retrieve the state value for the key, but don't wait for the operation to be completed
-     *
-     * @param key name of the key
-     * @return the state value for the key.
-     */
-    CompletableFuture<ByteBuffer> getStateAsync(String key);
-
-```
-
-The application can use `getStateAsync` to asynchronously retrieve the state of a given `key`.
-
-### Python API
-
-State currently is not supported at [Python SDK](functions-api.md#python-sdk-functions).
-
-## Query State
-
-A Pulsar Function can use the [State API](#api) for storing state into Pulsar's state storage
-and retrieving state back from Pulsar's state storage. Additionally Pulsar also provides
-CLI commands for querying its state.
-
-```shell
-
-$ bin/pulsar-admin functions querystate \
-    --tenant <tenant> \
-    --namespace <namespace> \
-    --name <function-name> \
-    --state-storage-url <bookkeeper-service-url> \
-    --key <state-key> \
-    [---watch]
-
-```
-
-If `--watch` is specified, the CLI will watch the value of the provided `state-key`.
-
-## Example
-
-### Java Example
-
-{@inject: github:WordCountFunction:/pulsar-functions/java-examples/src/main/java/org/apache/pulsar/functions/api/examples/WordCountFunction.java} is a very good example
-demonstrating on how Application can easily store `state` in Pulsar Functions.
-
-```java
-
-public class WordCountFunction implements Function<String, Void> {
-    @Override
-    public Void process(String input, Context context) throws Exception {
-        Arrays.asList(input.split("\\.")).forEach(word -> context.incrCounter(word, 1));
-        return null;
-    }
-}
-
-```
-
-The logic of this `WordCount` function is pretty simple and straightforward:
-
-1. The function first splits the received `String` into multiple words using regex `\\.`.
-2. For each `word`, the function increments the corresponding `counter` by 1 (via `incrCounter(key, amount)`).
-
-### Python Example
-
-State currently is not supported at [Python SDK](functions-api.md#python-sdk-functions).
-
diff --git a/site2/website-next/versioned_docs/version-2.2.0/io-connectors.md b/site2/website-next/versioned_docs/version-2.2.0/io-connectors.md
index 8db368e..9e8ede9 100644
--- a/site2/website-next/versioned_docs/version-2.2.0/io-connectors.md
+++ b/site2/website-next/versioned_docs/version-2.2.0/io-connectors.md
@@ -2,7 +2,6 @@
 id: io-connectors
 title: Built-in connector
 sidebar_label: "Built-in connector"
-original_id: io-connectors
 ---
 
 Pulsar distribution includes a set of common connectors that have been packaged and tested with the rest of Apache Pulsar. These connectors import and export data from some of the most commonly used data systems. 
@@ -46,6 +45,23 @@ Pulsar has various source connectors, which are sorted alphabetically as below.
 
 * [Java class](https://github.com/apache/pulsar/blob/master/pulsar-io/debezium/mongodb/src/main/java/org/apache/pulsar/io/debezium/mongodb/DebeziumMongoDbSource.java)
 
+### Debezium Oracle
+
+* [Configuration](io-debezium-source.md#configuration)
+
+* [Example](io-debezium-source.md#example-of-oracle)
+
+* [Java class](https://github.com/apache/pulsar/blob/master/pulsar-io/debezium/oracle/src/main/java/org/apache/pulsar/io/debezium/oracle/DebeziumOracleSource.java)
+
+### Debezium Microsoft SQL Server
+
+* [Configuration](io-debezium-source.md#configuration)
+
+* [Example](io-debezium-source.md#example-of-microsoft-sql)
+
+* [Java class](https://github.com/apache/pulsar/blob/master/pulsar-io/debezium/mssql/src/main/java/org/apache/pulsar/io/debezium/mssql/DebeziumMsSqlSource.java)
+
+  
 ### DynamoDB
 
 * [Configuration](io-dynamodb-source.md#configuration)
diff --git a/site2/website-next/versioned_docs/version-2.2.0/io-develop.md b/site2/website-next/versioned_docs/version-2.2.0/io-develop.md
index 2328f37..bbd6501 100644
--- a/site2/website-next/versioned_docs/version-2.2.0/io-develop.md
+++ b/site2/website-next/versioned_docs/version-2.2.0/io-develop.md
@@ -2,7 +2,6 @@
 id: io-develop
 title: How to develop Pulsar connectors
 sidebar_label: "Develop"
-original_id: io-develop
 ---
 
 import Tabs from '@theme/Tabs';
diff --git a/site2/website-next/versioned_docs/version-2.2.0/pulsar-2.0.md b/site2/website-next/versioned_docs/version-2.2.0/pulsar-2.0.md
index 11c5e66..560c8c1 100644
--- a/site2/website-next/versioned_docs/version-2.2.0/pulsar-2.0.md
+++ b/site2/website-next/versioned_docs/version-2.2.0/pulsar-2.0.md
@@ -2,7 +2,6 @@
 id: pulsar-2.0
 title: Pulsar 2.0
 sidebar_label: "Pulsar 2.0"
-original_id: pulsar-2.0
 ---
 
 Pulsar 2.0 is a major new release for Pulsar that brings some bold changes to the platform, including [simplified topic names](#topic-names), the addition of the [Pulsar Functions](functions-overview) feature, some terminology changes, and more.
diff --git a/site2/website-next/versioned_docs/version-2.2.0/reference-pulsar-admin.md b/site2/website-next/versioned_docs/version-2.2.0/pulsar-admin.md
similarity index 100%
rename from site2/website-next/versioned_docs/version-2.2.0/reference-pulsar-admin.md
rename to site2/website-next/versioned_docs/version-2.2.0/pulsar-admin.md
diff --git a/site2/website-next/versioned_docs/version-2.2.0/reference-cli-tools.md b/site2/website-next/versioned_docs/version-2.2.0/reference-cli-tools.md
index 3a46361..0c8aea1 100644
--- a/site2/website-next/versioned_docs/version-2.2.0/reference-cli-tools.md
+++ b/site2/website-next/versioned_docs/version-2.2.0/reference-cli-tools.md
@@ -2,7 +2,6 @@
 id: reference-cli-tools
 title: Pulsar command-line tools
 sidebar_label: "Pulsar CLI tools"
-original_id: reference-cli-tools
 ---
 
 Pulsar offers several command-line tools that you can use for managing Pulsar installations, performance testing, using command-line producers and consumers, and more.
@@ -16,8 +15,12 @@ All Pulsar command-line tools can be run from the `bin` directory of your [insta
 * [`bookkeeper`](#bookkeeper)
 * [`broker-tool`](#broker-tool)
 
-> ### Getting help
-> You can get help for any CLI tool, command, or subcommand using the `--help` flag, or `-h` for short. Here's an example:
+> **Important** 
+>
+> - This page only shows **some frequently used commands**. For the latest information about `pulsar`, `pulsar-client`, and `pulsar-perf`, including commands, flags, descriptions, and more information, see [Pulsar tools](https://pulsar.apache.org/tools/).
+>  
+> - You can get help for any CLI tool, command, or subcommand using the `--help` flag, or `-h` for short. Here's an example:
+> 
 
 > ```shell
 > 
@@ -45,7 +48,6 @@ Commands:
 * `bookie`
 * `broker`
 * `compact-topic`
-* `discovery`
 * `configuration-store`
 * `initialize-cluster-metadata`
 * `proxy`
@@ -53,6 +55,7 @@ Commands:
 * `websocket`
 * `zookeeper`
 * `zookeeper-shell`
+* `autorecovery`
 
 Example:
 
@@ -71,14 +74,13 @@ The table below lists the environment variables that you can use to configure th
 |`PULSAR_BOOKKEEPER_CONF`|description: Configuration file for bookie|`conf/bookkeeper.conf`|
 |`PULSAR_ZK_CONF`|Configuration file for zookeeper|`conf/zookeeper.conf`|
 |`PULSAR_CONFIGURATION_STORE_CONF`|Configuration file for the configuration store|`conf/global_zookeeper.conf`|
-|`PULSAR_DISCOVERY_CONF`|Configuration file for discovery service|`conf/discovery.conf`|
 |`PULSAR_WEBSOCKET_CONF`|Configuration file for websocket proxy|`conf/websocket.conf`|
 |`PULSAR_STANDALONE_CONF`|Configuration file for standalone|`conf/standalone.conf`|
 |`PULSAR_EXTRA_OPTS`|Extra options to be passed to the jvm||
 |`PULSAR_EXTRA_CLASSPATH`|Extra paths for Pulsar's classpath||
 |`PULSAR_PID_DIR`|Folder where the pulsar server PID file should be stored||
 |`PULSAR_STOP_TIMEOUT`|Wait time before forcefully killing the Bookie server instance if attempts to stop it are not successful||
-
+|`PULSAR_GC_LOG`|Gc options to be passed to the jvm||
 
 
 ### `bookie`
@@ -165,26 +167,6 @@ $ pulsar compact-topic --topic topic-to-compact
 
 ```
 
-### `discovery`
-
-Run a discovery server
-
-Usage
-
-```bash
-
-$ pulsar discovery
-
-```
-
-Example
-
-```bash
-
-$ PULSAR_DISCOVERY_CONF=/path/to/discovery.conf pulsar discovery
-
-```
-
 ### `configuration-store`
 
 Starts up the Pulsar configuration store
@@ -224,14 +206,14 @@ Options
 |`-ub` , `--broker-service-url`|The broker service URL for the new cluster||
 |`-tb` , `--broker-service-url-tls`|The broker service URL for the new cluster with TLS encryption||
 |`-c` , `--cluster`|Cluster name||
-|`-cs` , `--configuration-store`|The configuration store quorum connection string||
+|`-cms` , `--configuration-metadata-store`|The configuration metadata store quorum connection string||
 |`--existing-bk-metadata-service-uri`|The metadata service URI of the existing BookKeeper cluster that you want to use||
 |`-h` , `--help`|Cluster name|false|
 |`--initial-num-stream-storage-containers`|The number of storage containers of BookKeeper stream storage|16|
 |`--initial-num-transaction-coordinators`|The number of transaction coordinators assigned in a cluster|16|
 |`-uw` , `--web-service-url`|The web service URL for the new cluster||
 |`-tw` , `--web-service-url-tls`|The web service URL for the new cluster with TLS encryption||
-|`-zk` , `--zookeeper`|The local ZooKeeper quorum connection string||
+|`-md` , `--metadata-store`|The metadata store service url||
 |`--zookeeper-session-timeout-ms`|The local ZooKeeper session timeout. The time unit is in millisecond(ms)|30000|
 
 
@@ -355,6 +337,23 @@ Options
 |`-c`, `--conf`|Configuration file for ZooKeeper||
 |`-server`|Configuration zk address, eg: `127.0.0.1:2181`||
 
+### `autorecovery`
+
+Runs an auto-recovery service.
+
+Usage
+
+```bash
+
+$ pulsar autorecovery options
+
+```
+
+Options
+
+|Flag|Description|Default|
+|---|---|---|
+|`-c`, `--conf`|Configuration for the autorecovery|N/A|
 
 
 ## `pulsar-client`
@@ -407,6 +406,7 @@ Options
 |`-m`, `--messages`|Comma-separated string of messages to send; either -m or -f must be specified|[]|
 |`-n`, `--num-produce`|The number of times to send the message(s); the count of messages/files * num-produce should be below 1000|1|
 |`-r`, `--rate`|Rate (in messages per second) at which to produce; a value 0 means to produce messages as fast as possible|0.0|
+|`-db`, `--disable-batching`|Disable batch sending of messages|false|
 |`-c`, `--chunking`|Split the message and publish in chunks if the message size is larger than the allowed max size|false|
 |`-s`, `--separator`|Character to split messages string with.|","|
 |`-k`, `--key`|Message key to add|key=value string, like k1=v1,k2=v2.|
@@ -462,6 +462,7 @@ $ pulsar-daemon command
 Commands
 * `start`
 * `stop`
+* `restart`
 
 
 ### `start`
@@ -492,7 +493,14 @@ Options
 |---|---|---|
 |-force|Stop the service forcefully if not stopped by normal shutdown.|false|
 
+### `restart`
+Restart a service that has already been started.
 
+```bash
+
+$ pulsar-daemon restart service
+
+```
 
 ## `pulsar-perf`
 A tool for performance testing a Pulsar broker.
@@ -514,6 +522,7 @@ Commands
 * `monitor-brokers`
 * `simulation-client`
 * `simulation-controller`
+* `transaction`
 * `help`
 
 Environment variables
@@ -526,6 +535,7 @@ The table below lists the environment variables that you can use to configure th
 |`PULSAR_CLIENT_CONF`|Configuration file for the client|conf/client.conf|
 |`PULSAR_EXTRA_OPTS`|Extra options to be passed to the JVM||
 |`PULSAR_EXTRA_CLASSPATH`|Extra paths for Pulsar's classpath||
+|`PULSAR_GC_LOG`|Gc options to be passed to the jvm||
 
 
 ### `consume`
@@ -544,7 +554,7 @@ Options
 |Flag|Description|Default|
 |---|---|---|
 |`--auth-params`|Authentication parameters, whose format is determined by the implementation of method `configure` in authentication plugin class. For example, `key1:val1,key2:val2` or `{"key1":"val1","key2":"val2"}`.||
-|`--auth_plugin`|Authentication plugin class name||
+|`--auth-plugin`|Authentication plugin class name||
 |`-ac`, `--auto_ack_chunk_q_full`|Auto ack for the oldest message in consumer's receiver queue if the queue full|false|
 |`--listener-name`|Listener name for the broker||
 |`--acks-delay-millis`|Acknowledgements grouping delay in millis|100|
@@ -553,11 +563,13 @@ Options
 |`-v`, `--encryption-key-value-file`|The file which contains the private key to decrypt payload||
 |`-h`, `--help`|Help message|false|
 |`--conf-file`|Configuration file||
+|`-m`, `--num-messages`|Number of messages to consume in total. If the value is equal to or smaller than 0, it keeps consuming messages.|0|
 |`-e`, `--expire_time_incomplete_chunked_messages`|The expiration time for incomplete chunk messages (in milliseconds)|0|
 |`-c`, `--max-connections`|Max number of TCP connections to a single broker|100|
 |`-mc`, `--max_chunked_msg`|Max pending chunk messages|0|
 |`-n`, `--num-consumers`|Number of consumers (per topic)|1|
 |`-ioThreads`, `--num-io-threads`|Set the number of threads to be used for handling connections to brokers|1|
+|`-lt`, `--num-listener-threads`|Set the number of threads to be used for message listeners|1|
 |`-ns`, `--num-subscriptions`|Number of subscriptions (per topic)|1|
 |`-t`, `--num-topics`|The number of topics|1|
 |`-pm`, `--pool-messages`|Use the pooled message|true|
@@ -571,10 +583,21 @@ Options
 |`-ss`, `--subscriptions`|A list of subscriptions to consume on (e.g. sub1,sub2)|sub|
 |`-st`, `--subscription-type`|Subscriber type. Possible values are Exclusive, Shared, Failover, Key_Shared.|Exclusive|
 |`-sp`, `--subscription-position`|Subscriber position. Possible values are Latest, Earliest.|Latest|
-|`-time`, `--test-duration`|Test duration (in seconds). If the value is 0 or smaller than 0, it keeps consuming messages|0|
+|`-time`, `--test-duration`|Test duration (in seconds). If this value is less than or equal to 0, it keeps consuming messages.|0|
 |`--trust-cert-file`|Path for the trusted TLS certificate file||
 |`--tls-allow-insecure`|Allow insecure TLS connection||
 
+Below are **transaction** related options.
+
+If you want `--txn-timeout`, `--numMessage-perTransaction`, `-nmt`, `-ntxn`, or `-abort` take effect, set `--txn-enable` to true.
+
+|Flag|Description|Default|
+|---|---|---|
+`-tto`, `--txn-timeout`|Set the time of transaction timeout (in second). |10
+`-nmt`, `--numMessage-perTransaction`|The number of messages acknowledged by a transaction. |50
+`-txn`, `--txn-enable`|Enable or disable a transaction.|false
+`-ntxn`|The number of opened transactions. 0 means the number of transactions is unlimited. |0
+`-abort`|Abort a transaction. |true
 
 ### `produce`
 Run a producer
@@ -594,7 +617,7 @@ Options
 |`-am`, `--access-mode`|Producer access mode. Valid values are `Shared`, `Exclusive` and `WaitForExclusive`|Shared|
 |`-au`, `--admin-url`|Pulsar admin URL||
 |`--auth-params`|Authentication parameters, whose format is determined by the implementation of method `configure` in authentication plugin class. For example, `key1:val1,key2:val2` or `{"key1":"val1","key2":"val2"}`.||
-|`--auth_plugin`|Authentication plugin class name||
+|`--auth-plugin`|Authentication plugin class name||
 |`--listener-name`|Listener name for the broker||
 |`-b`, `--batch-time-window`|Batch messages in a window of the specified number of milliseconds|1|
 |`-bb`, `--batch-max-bytes`|Maximum number of bytes per batch|4194304|
@@ -613,9 +636,9 @@ Options
 |`-c`, `--max-connections`|Max number of TCP connections to a single broker|100|
 |`-o`, `--max-outstanding`|Max number of outstanding messages|1000|
 |`-p`, `--max-outstanding-across-partitions`|Max number of outstanding messages across partitions|50000|
+|`-m`, `--num-messages`|Number of messages to publish in total. If this value is less than or equal to 0, it keeps publishing messages.|0|
 |`-mk`, `--message-key-generation-mode`|The generation mode of message key. Valid options are `autoIncrement`, `random`||
 |`-ioThreads`, `--num-io-threads`|Set the number of threads to be used for handling connections to brokers|1|
-|`-m`, `--num-messages`|Number of messages to publish in total. If the value is 0 or smaller than 0, it keeps publishing messages.|0|
 |`-n`, `--num-producers`|The number of producers (per topic)|1|
 |`-threads`, `--num-test-threads`|Number of test threads|1|
 |`-t`, `--num-topic`|The number of topics|1|
@@ -629,11 +652,21 @@ Options
 |`-u`, `--service-url`|Pulsar service URL||
 |`-s`, `--size`|Message size (in bytes)|1024|
 |`-i`, `--stats-interval-seconds`|Statistics interval seconds. If 0, statistics will be disabled.|0|
-|`-time`, `--test-duration`|Test duration (in seconds). If the value is 0 or smaller than 0, it keeps publishing messages.|0|
+|`-time`, `--test-duration`|Test duration (in seconds). If this value is less than or equal to 0, it keeps publishing messages.|0|
 |`--trust-cert-file`|Path for the trusted TLS certificate file||
 |`--warmup-time`|Warm-up time in seconds|1|
 |`--tls-allow-insecure`|Allow insecure TLS connection||
 
+Below are **transaction** related options.
+
+If you want `--txn-timeout`, `--numMessage-perTransaction`, or `-abort` take effect, set `--txn-enable` to true.
+
+|Flag|Description|Default|
+|---|---|---|
+`-tto`, `--txn-timeout`|Set the time of transaction timeout (in second). |5
+`-nmt`, `--numMessage-perTransaction`|The number of messages acknowledged by a transaction. |50
+`-txn`, `--txn-enable`|Enable or disable a transaction.|true
+`-abort`|Abort a transaction. |true
 
 ### `read`
 Run a topic reader
@@ -651,19 +684,21 @@ Options
 |Flag|Description|Default|
 |---|---|---|
 |`--auth-params`|Authentication parameters, whose format is determined by the implementation of method `configure` in authentication plugin class. For example, `key1:val1,key2:val2` or `{"key1":"val1","key2":"val2"}`.||
-|`--auth_plugin`|Authentication plugin class name||
+|`--auth-plugin`|Authentication plugin class name||
 |`--listener-name`|Listener name for the broker||
 |`--conf-file`|Configuration file||
 |`-h`, `--help`|Help message|false|
+|`-n`, `--num-messages`|Number of messages to consume in total. If the value is equal to or smaller than 0, it keeps consuming messages.|0|
 |`-c`, `--max-connections`|Max number of TCP connections to a single broker|100|
 |`-ioThreads`, `--num-io-threads`|Set the number of threads to be used for handling connections to brokers|1|
+|`-lt`, `--num-listener-threads`|Set the number of threads to be used for message listeners|1|
 |`-t`, `--num-topics`|The number of topics|1|
 |`-r`, `--rate`|Simulate a slow message reader (rate in msg/s)|0|
 |`-q`, `--receiver-queue-size`|Size of the receiver queue|1000|
 |`-u`, `--service-url`|Pulsar service URL||
 |`-m`, `--start-message-id`|Start message id. This can be either 'earliest', 'latest' or a specific message id by using 'lid:eid'|earliest|
 |`-i`, `--stats-interval-seconds`|Statistics interval seconds. If 0, statistics will be disabled.|0|
-|`-time`, `--test-duration`|Test duration (in seconds). If the value is 0 or smaller than 0, it keeps consuming messages.|0|
+|`-time`, `--test-duration`|Test duration (in seconds). If this value is less than or equal to 0, it keeps consuming messages.|0|
 |`--trust-cert-file`|Path for the trusted TLS certificate file||
 |`--use-tls`|Use TLS encryption on the connection|false|
 |`--tls-allow-insecure`|Allow insecure TLS connection||
@@ -684,16 +719,19 @@ Options
 |Flag|Description|Default|
 |---|---|---|
 |`--auth-params`|Authentication parameters, whose format is determined by the implementation of method `configure` in authentication plugin class. For example, `key1:val1,key2:val2` or `{"key1":"val1","key2":"val2"}`.||
-|`--auth_plugin`|Authentication plugin class name||
+|`--auth-plugin`|Authentication plugin class name||
 |`--conf-file`|Configuration file||
 |`-h`, `--help`|Help message|false|
-|`-m`, `--num-messages`|Number of messages to publish in total. If the value is 0 or smaller than 0, it keeps publishing messages|0|
+|`-m`, `--num-messages`|Number of messages to publish in total. If this value is less than or equal to 0, it keeps publishing messages.|0|
 |`-t`, `--num-topic`|The number of topics|1|
 |`-f`, `--payload-file`|Use payload from a file instead of empty buffer||
+|`-e`, `--payload-delimiter`|The delimiter used to split lines when using payload from a file|\n|
+|`-fp`, `--format-payload`|Format %i as a message index in the stream from producer and/or %t as the timestamp nanoseconds|false|
+|`-fc`, `--format-class`|Custom formatter class name|`org.apache.pulsar.testclient.DefaultMessageFormatter`|
 |`-u`, `--proxy-url`|Pulsar Proxy URL, e.g., "ws://localhost:8080/"||
 |`-r`, `--rate`|Publish rate msg/s across topics|100|
 |`-s`, `--size`|Message size in byte|1024|
-|`-time`, `--test-duration`|Test duration (in seconds). If the value is 0 or smaller than 0, it keeps publishing messages|0|
+|`-time`, `--test-duration`|Test duration (in seconds). If this value is less than or equal to 0, it keeps publishing messages.|0|
 
 
 ### `managed-ledger`
@@ -717,11 +755,11 @@ Options
 |`-h`, `--help`|Help message|false|
 |`-c`, `--max-connections`|Max number of TCP connections to a single bookie|1|
 |`-o`, `--max-outstanding`|Max number of outstanding requests|1000|
-|`-m`, `--num-messages`|Number of messages to publish in total. If the value is 0 or smaller than 0, it keeps publishing messages|0|
+|`-m`, `--num-messages`|Number of messages to publish in total. If this value is less than or equal to 0, it keeps publishing messages.|0|
 |`-t`, `--num-topic`|Number of managed ledgers|1|
 |`-r`, `--rate`|Write rate msg/s across managed ledgers|100|
 |`-s`, `--size`|Message size in byte|1024|
-|`-time`, `--test-duration`|Test duration (in seconds). If the value is 0 or smaller than 0, it keeps publishing messages|0|
+|`-time`, `--test-duration`|Test duration (in seconds). If this value is less than or equal to 0, it keeps publishing messages.|0|
 |`--threads`|Number of threads writing|1|
 |`-w`, `--write-quorum`|Ledger write quorum|1|
 |`-zk`, `--zookeeperServers`|ZooKeeper connection string||
@@ -785,6 +823,45 @@ Options
 |`--cluster`|The cluster to test on||
 |`-h`, `--help`|Help message|false|
 
+### `transaction`
+
+Run a transaction. For more information, see [Pulsar transactions](txn-why).
+
+**Usage**
+
+```bash
+
+$ pulsar-perf transaction options
+
+```
+
+**Options**
+
+|Flag|Description|Default|
+|---|---|---|
+`-au`, `--admin-url`|Pulsar admin URL.|N/A
+`--conf-file`|Configuration file.|N/A
+`-h`, `--help`|Help messages.|N/A
+`-c`, `--max-connections`|Maximum number of TCP connections to a single broker.|100
+`-ioThreads`, `--num-io-threads`|Set the number of threads to be used for handling connections to brokers. |1
+`-ns`, `--num-subscriptions`|Number of subscriptions per topic.|1
+`-threads`, `--num-test-threads`|Number of test threads. <br /><br />This thread is for a new transaction to ack messages from consumer topics, produce messages to producer topics, and commit or abort this transaction. <br /><br /> Increasing the number of threads increases the parallelism of the performance test, consequently, it increases the intensity of the stress test.|1
+`-nmc`, `--numMessage-perTransaction-consume`|Set the number of messages consumed in a transaction. <br /><br /> If transaction is disabled, it means the number of messages consumed in a task instead of in a transaction.|1
+`-nmp`, `--numMessage-perTransaction-produce`|Set the number of messages produced in a transaction. <br /><br />If transaction is disabled, it means the number of messages produced in a task instead of in a transaction.|1
+`-ntxn`, `--number-txn`|Set the number of transactions. <br /><br /> 0 means the number of transactions is unlimited. <br /><br /> If transaction is disabled, it means the number of tasks instead of transactions. |0
+`-np`, `--partitions`|Create partitioned topics with a given number of partitions. <br /><br /> 0 means not trying to create a topic.
+`-q`, `--receiver-queue-size`|Size of the receiver queue.|1000
+`-u`, `--service-url`|Pulsar service URL.|N/A
+`-sp`, `--subscription-position`|Subscription position.|Earliest
+`-st`, `--subscription-type`|Subscription type.|Shared
+`-ss`, `--subscriptions`|A list of subscriptions to consume. <br /><br /> For example, sub1,sub2.|[sub]
+`-time`, `--test-duration`|Test duration (in second). <br /><br /> 0 means keeping publishing messages.|0
+`--topics-c`|All topics assigned to consumers.|[test-consume]
+`--topics-p`|All topics assigned to producers . |[test-produce]
+`--txn-disEnable`|Disable transaction.|true
+`-tto`, `--txn-timeout`|Set the time of transaction timeout (in second). <br /><br /> If you want `--txn-timeout` takes effect, set `--txn-enable` to true.|5
+`-abort`|Abort the transaction. <br /><br /> If you want `-abort` takes effect, set `--txn-disEnable` to false.|true
+`-txnRate`|Set the rate of opened transactions or tasks. <br /><br /> 0 means no limit.|0
 
 ### `help`
 This help message
@@ -829,9 +906,10 @@ The table below lists the environment variables that you can use to configure th
 |ENTRY_FORMATTER_CLASS|The Java class used to format entries||
 |BOOKIE_PID_DIR|Folder where the BookKeeper server PID file should be stored||
 |BOOKIE_STOP_TIMEOUT|Wait time before forcefully killing the Bookie server instance if attempts to stop it are not successful||
+|BOOKIE_GC_LOG|Gc options to be passed to the jvm||
 
 
-### `auto-recovery`
+### `autorecovery`
 Runs an auto-recovery service
 
 Usage
diff --git a/site2/website-next/versioned_docs/version-2.2.0/reference-terminology.md b/site2/website-next/versioned_docs/version-2.2.0/reference-terminology.md
index d0e7368..ebc114d 100644
--- a/site2/website-next/versioned_docs/version-2.2.0/reference-terminology.md
+++ b/site2/website-next/versioned_docs/version-2.2.0/reference-terminology.md
@@ -2,7 +2,6 @@
 id: reference-terminology
 title: Pulsar Terminology
 sidebar_label: "Terminology"
-original_id: reference-terminology
 ---
 
 Here is a glossary of terms related to Apache Pulsar:
diff --git a/site2/website-next/versioned_docs/version-2.2.0/security-athenz.md b/site2/website-next/versioned_docs/version-2.2.0/security-athenz.md
index 947c3f4..ba27ba4 100644
--- a/site2/website-next/versioned_docs/version-2.2.0/security-athenz.md
+++ b/site2/website-next/versioned_docs/version-2.2.0/security-athenz.md
@@ -2,7 +2,6 @@
 id: security-athenz
 title: Authentication using Athenz
 sidebar_label: "Authentication using Athenz"
-original_id: security-athenz
 ---
 
 [Athenz](https://github.com/AthenZ/athenz) is a role-based authentication/authorization system. In Pulsar, you can use Athenz role tokens (also known as *z-tokens*) to establish the identify of the client.
diff --git a/site2/website-next/versioned_docs/version-2.2.0/security-authorization.md b/site2/website-next/versioned_docs/version-2.2.0/security-authorization.md
index e678587..b1003d2 100644
--- a/site2/website-next/versioned_docs/version-2.2.0/security-authorization.md
+++ b/site2/website-next/versioned_docs/version-2.2.0/security-authorization.md
@@ -2,7 +2,6 @@
 id: security-authorization
 title: Authentication and authorization in Pulsar
 sidebar_label: "Authorization and ACLs"
-original_id: security-authorization
 ---
 
 
diff --git a/site2/website-next/versioned_docs/version-2.2.0/security-encryption.md b/site2/website-next/versioned_docs/version-2.2.0/security-encryption.md
index cc43082..90d0dbe 100644
--- a/site2/website-next/versioned_docs/version-2.2.0/security-encryption.md
+++ b/site2/website-next/versioned_docs/version-2.2.0/security-encryption.md
@@ -2,7 +2,6 @@
 id: security-encryption
 title: Pulsar Encryption
 sidebar_label: "End-to-End Encryption"
-original_id: security-encryption
 ---
 
 import Tabs from '@theme/Tabs';
diff --git a/site2/website-next/versioned_docs/version-2.2.0/security-overview.md b/site2/website-next/versioned_docs/version-2.2.0/security-overview.md
index 82a289f..91f4ba8 100644
--- a/site2/website-next/versioned_docs/version-2.2.0/security-overview.md
+++ b/site2/website-next/versioned_docs/version-2.2.0/security-overview.md
@@ -2,7 +2,6 @@
 id: security-overview
 title: Pulsar security overview
 sidebar_label: "Overview"
-original_id: security-overview
 ---
 
 As the central message bus for a business, Apache Pulsar is frequently used for storing mission-critical data. Therefore, enabling security features in Pulsar is crucial.
diff --git a/site2/website-next/versioned_docs/version-2.2.0/security-tls-authentication.md b/site2/website-next/versioned_docs/version-2.2.0/security-tls-authentication.md
index f3c32a0..e6921bd 100644
--- a/site2/website-next/versioned_docs/version-2.2.0/security-tls-authentication.md
+++ b/site2/website-next/versioned_docs/version-2.2.0/security-tls-authentication.md
@@ -2,7 +2,6 @@
 id: security-tls-authentication
 title: Authentication using TLS
 sidebar_label: "Authentication using TLS"
-original_id: security-tls-authentication
 ---
 
 ## TLS authentication overview
diff --git a/site2/website-next/versioned_docs/version-2.2.0/standalone-docker.md b/site2/website-next/versioned_docs/version-2.2.0/standalone-docker.md
index 05ac2a1..7ee20c2 100644
--- a/site2/website-next/versioned_docs/version-2.2.0/standalone-docker.md
+++ b/site2/website-next/versioned_docs/version-2.2.0/standalone-docker.md
@@ -2,14 +2,11 @@
 id: standalone-docker
 title: Set up a standalone Pulsar in Docker
 sidebar_label: "Run Pulsar in Docker"
-original_id: standalone-docker
 ---
 
-For local development and testing, you can run Pulsar in standalone
-mode on your own machine within a Docker container.
+For local development and testing, you can run Pulsar in standalone mode on your own machine within a Docker container. 
 
-If you have not installed Docker, download the [Community edition](https://www.docker.com/community-edition)
-and follow the instructions for your OS.
+If you have not installed Docker, download the [Community edition](https://www.docker.com/community-edition) and follow the instructions for your OS.
 
 ## Start Pulsar in Docker
 
@@ -17,13 +14,7 @@ and follow the instructions for your OS.
 
   ```shell
   
-  $ docker run -it \
-  -p 6650:6650 \
-  -p 8080:8080 \
-  --mount source=pulsardata,target=/pulsar/data \
-  --mount source=pulsarconf,target=/pulsar/conf \
-  apachepulsar/pulsar:@pulsar:version@ \
-  bin/pulsar standalone
+  $ docker run -it -p 6650:6650  -p 8080:8080 --mount source=pulsardata,target=/pulsar/data --mount source=pulsarconf,target=/pulsar/conf apachepulsar/pulsar:@pulsar:version@ bin/pulsar standalone
   
   ```
 
@@ -36,8 +27,9 @@ If you start Pulsar successfully, you will see `INFO`-level log messages like th
 
 ```
 
-2017-08-09 22:34:04,030 - INFO  - [main:WebService@213] - Web Service started at http://127.0.0.1:8080
-2017-08-09 22:34:04,038 - INFO  - [main:PulsarService@335] - messaging service is ready, bootstrap service on port=8080, broker url=pulsar://127.0.0.1:6650, cluster=standalone, configs=org.apache.pulsar.broker.ServiceConfiguration@4db60246
+08:18:30.970 [main] INFO  org.apache.pulsar.broker.web.WebService - HTTP Service started at http://0.0.0.0:8080
+...
+07:53:37.322 [main] INFO  org.apache.pulsar.broker.PulsarService - messaging service is ready, bootstrap service port = 8080, broker url= pulsar://localhost:6650, cluster=standalone, configs=org.apache.pulsar.broker.ServiceConfiguration@98b63c1
 ...
 
 ```
@@ -60,7 +52,7 @@ use one of these root URLs to interact with your cluster:
 * `pulsar://localhost:6650`
 * `http://localhost:8080`
 
-The following example will guide you get started with Pulsar quickly by using the [Python](client-libraries-python)
+The following example will guide you get started with Pulsar quickly by using the [Python client API](client-libraries-python)
 client API.
 
 Install the Pulsar Python client library directly from [PyPI](https://pypi.org/project/pulsar-client/):
@@ -128,51 +120,93 @@ The output is something like this:
 ```json
 
 {
-  "averageMsgSize": 0.0,
-  "msgRateIn": 0.0,
-  "msgRateOut": 0.0,
-  "msgThroughputIn": 0.0,
-  "msgThroughputOut": 0.0,
-  "publishers": [
-    {
-      "address": "/172.17.0.1:35048",
-      "averageMsgSize": 0.0,
-      "clientVersion": "1.19.0-incubating",
-      "connectedSince": "2017-08-09 20:59:34.621+0000",
-      "msgRateIn": 0.0,
-      "msgThroughputIn": 0.0,
-      "producerId": 0,
-      "producerName": "standalone-0-1"
-    }
-  ],
-  "replication": {},
-  "storageSize": 16,
-  "subscriptions": {
-    "my-sub": {
-      "blockedSubscriptionOnUnackedMsgs": false,
-      "consumers": [
+    "msgRateIn": 0.0,
+    "msgThroughputIn": 0.0,
+    "msgRateOut": 1.8332950480217471,
+    "msgThroughputOut": 91.33142602871978,
+    "bytesInCounter": 7097,
+    "msgInCounter": 143,
+    "bytesOutCounter": 6607,
+    "msgOutCounter": 133,
+    "averageMsgSize": 0.0,
+    "msgChunkPublished": false,
+    "storageSize": 7097,
+    "backlogSize": 0,
+    "offloadedStorageSize": 0,
+    "publishers": [
         {
-          "address": "/172.17.0.1:35064",
-          "availablePermits": 996,
-          "blockedConsumerOnUnackedMsgs": false,
-          "clientVersion": "1.19.0-incubating",
-          "connectedSince": "2017-08-09 21:05:39.222+0000",
-          "consumerName": "166111",
-          "msgRateOut": 0.0,
-          "msgRateRedeliver": 0.0,
-          "msgThroughputOut": 0.0,
-          "unackedMessages": 0
+            "accessMode": "Shared",
+            "msgRateIn": 0.0,
+            "msgThroughputIn": 0.0,
+            "averageMsgSize": 0.0,
+            "chunkedMessageRate": 0.0,
+            "producerId": 0,
+            "metadata": {},
+            "address": "/127.0.0.1:35604",
+            "connectedSince": "2021-07-04T09:05:43.04788Z",
+            "clientVersion": "2.8.0",
+            "producerName": "standalone-2-5"
+        }
+    ],
+    "waitingPublishers": 0,
+    "subscriptions": {
+        "my-sub": {
+            "msgRateOut": 1.8332950480217471,
+            "msgThroughputOut": 91.33142602871978,
+            "bytesOutCounter": 6607,
+            "msgOutCounter": 133,
+            "msgRateRedeliver": 0.0,
+            "chunkedMessageRate": 0,
+            "msgBacklog": 0,
+            "backlogSize": 0,
+            "msgBacklogNoDelayed": 0,
+            "blockedSubscriptionOnUnackedMsgs": false,
+            "msgDelayed": 0,
+            "unackedMessages": 0,
+            "type": "Exclusive",
+            "activeConsumerName": "3c544f1daa",
+            "msgRateExpired": 0.0,
+            "totalMsgExpired": 0,
+            "lastExpireTimestamp": 0,
+            "lastConsumedFlowTimestamp": 1625389101290,
+            "lastConsumedTimestamp": 1625389546070,
+            "lastAckedTimestamp": 1625389546162,
+            "lastMarkDeleteAdvancedTimestamp": 1625389546163,
+            "consumers": [
+                {
+                    "msgRateOut": 1.8332950480217471,
+                    "msgThroughputOut": 91.33142602871978,
+                    "bytesOutCounter": 6607,
+                    "msgOutCounter": 133,
+                    "msgRateRedeliver": 0.0,
+                    "chunkedMessageRate": 0.0,
+                    "consumerName": "3c544f1daa",
+                    "availablePermits": 867,
+                    "unackedMessages": 0,
+                    "avgMessagesPerEntry": 6,
+                    "blockedConsumerOnUnackedMsgs": false,
+                    "lastAckedTimestamp": 1625389546162,
+                    "lastConsumedTimestamp": 1625389546070,
+                    "metadata": {},
+                    "address": "/127.0.0.1:35472",
+                    "connectedSince": "2021-07-04T08:58:21.287682Z",
+                    "clientVersion": "2.8.0"
+                }
+            ],
+            "isDurable": true,
+            "isReplicated": false,
+            "allowOutOfOrderDelivery": false,
+            "consumersAfterMarkDeletePosition": {},
+            "nonContiguousDeletedMessagesRanges": 0,
+            "nonContiguousDeletedMessagesRangesSerializedSize": 0,
+            "durable": true,
+            "replicated": false
         }
-      ],
-      "msgBacklog": 0,
-      "msgRateExpired": 0.0,
-      "msgRateOut": 0.0,
-      "msgRateRedeliver": 0.0,
-      "msgThroughputOut": 0.0,
-      "type": "Exclusive",
-      "unackedMessages": 0
-    }
-  }
+    },
+    "replication": {},
+    "deduplicationStatus": "Disabled",
+    "nonContiguousDeletedMessagesRanges": 0,
+    "nonContiguousDeletedMessagesRangesSerializedSize": 0
 }
 
 ```
diff --git a/site2/website-next/versioned_docs/version-2.2.0/getting-started-standalone.md b/site2/website-next/versioned_docs/version-2.2.0/standalone.md
similarity index 100%
rename from site2/website-next/versioned_docs/version-2.2.0/getting-started-standalone.md
rename to site2/website-next/versioned_docs/version-2.2.0/standalone.md
diff --git a/site2/website-next/versioned_docs/version-2.2.1/adaptors-kafka.md b/site2/website-next/versioned_docs/version-2.2.1/adaptors-kafka.md
index ad0d886..27382e9 100644
--- a/site2/website-next/versioned_docs/version-2.2.1/adaptors-kafka.md
+++ b/site2/website-next/versioned_docs/version-2.2.1/adaptors-kafka.md
@@ -2,7 +2,6 @@
 id: adaptors-kafka
 title: Pulsar adaptor for Apache Kafka
 sidebar_label: "Kafka client wrapper"
-original_id: adaptors-kafka
 ---
 
 
@@ -261,6 +260,7 @@ You can configure Pulsar authentication provider directly from the Kafka propert
 | [`pulsar.producer.batching.enabled`](http://pulsar.apache.org/api/client/org/apache/pulsar/client/api/ProducerConfiguration.html#setBatchingEnabled-boolean-) | `true` | Control whether automatic batching of messages is enabled for the producer. |
 | [`pulsar.producer.batching.max.messages`](http://pulsar.apache.org/api/client/org/apache/pulsar/client/api/ProducerConfiguration.html#setBatchingMaxMessages-int-) | `1000` | The maximum number of messages in a batch. |
 | [`pulsar.block.if.producer.queue.full`](http://pulsar.apache.org/api/client/org/apache/pulsar/client/api/ProducerConfiguration.html#setBlockIfQueueFull-boolean-) | | Specify the block producer if queue  is full. |
+| [`pulsar.crypto.reader.factory.class.name`](http://pulsar.apache.org/api/client/org/apache/pulsar/client/api/ProducerConfiguration.html#setCryptoKeyReader-org.apache.pulsar.client.api.CryptoKeyReader-) | | Specify the CryptoReader-Factory(`CryptoKeyReaderFactory`) classname which allows producer to create CryptoKeyReader. |
 
 
 ### Pulsar consumer Properties
@@ -272,3 +272,4 @@ You can configure Pulsar authentication provider directly from the Kafka propert
 | [`pulsar.consumer.acknowledgments.group.time.millis`](http://pulsar.apache.org/api/client/org/apache/pulsar/client/api/ConsumerBuilder.html#acknowledgmentGroupTime-long-java.util.concurrent.TimeUnit-) | 100 | Set the maximum amount of group time for consumers to send the acknowledgments to the broker. |
 | [`pulsar.consumer.total.receiver.queue.size.across.partitions`](http://pulsar.apache.org/api/client/org/apache/pulsar/client/api/ConsumerConfiguration.html#setMaxTotalReceiverQueueSizeAcrossPartitions-int-) | 50000 | Set the maximum size of the total receiver queue across partitions. |
 | [`pulsar.consumer.subscription.topics.mode`](http://pulsar.apache.org/api/client/org/apache/pulsar/client/api/ConsumerBuilder.html#subscriptionTopicsMode-Mode-) | PersistentOnly | Set the subscription topic mode for consumers. |
+| [`pulsar.crypto.reader.factory.class.name`](http://pulsar.apache.org/api/client/org/apache/pulsar/client/api/ProducerConfiguration.html#setCryptoKeyReader-org.apache.pulsar.client.api.CryptoKeyReader-) | | Specify the CryptoReader-Factory(`CryptoKeyReaderFactory`) classname which allows consumer to create CryptoKeyReader. |
diff --git a/site2/website-next/versioned_docs/version-2.2.1/adaptors-spark.md b/site2/website-next/versioned_docs/version-2.2.1/adaptors-spark.md
index e14f13b..afa5a7e 100644
--- a/site2/website-next/versioned_docs/version-2.2.1/adaptors-spark.md
+++ b/site2/website-next/versioned_docs/version-2.2.1/adaptors-spark.md
@@ -2,7 +2,6 @@
 id: adaptors-spark
 title: Pulsar adaptor for Apache Spark
 sidebar_label: "Apache Spark"
-original_id: adaptors-spark
 ---
 
 ## Spark Streaming receiver
diff --git a/site2/website-next/versioned_docs/version-2.2.1/adaptors-storm.md b/site2/website-next/versioned_docs/version-2.2.1/adaptors-storm.md
index 76d5071..9df9076 100644
--- a/site2/website-next/versioned_docs/version-2.2.1/adaptors-storm.md
+++ b/site2/website-next/versioned_docs/version-2.2.1/adaptors-storm.md
@@ -2,7 +2,6 @@
 id: adaptors-storm
 title: Pulsar adaptor for Apache Storm
 sidebar_label: "Apache Storm"
-original_id: adaptors-storm
 ---
 
 Pulsar Storm is an adaptor for integrating with [Apache Storm](http://storm.apache.org/) topologies. It provides core Storm implementations for sending and receiving data.
diff --git a/site2/website-next/versioned_docs/version-2.2.1/admin-api-brokers.md b/site2/website-next/versioned_docs/version-2.2.1/admin-api-brokers.md
index dbac453..10a90ca 100644
--- a/site2/website-next/versioned_docs/version-2.2.1/admin-api-brokers.md
+++ b/site2/website-next/versioned_docs/version-2.2.1/admin-api-brokers.md
@@ -2,7 +2,6 @@
 id: admin-api-brokers
 title: Managing Brokers
 sidebar_label: "Brokers"
-original_id: admin-api-brokers
 ---
 
 import Tabs from '@theme/Tabs';
@@ -26,9 +25,9 @@ Pulsar brokers consist of two components:
 
 [Brokers](reference-terminology.md#broker) can be managed via:
 
-* The [`brokers`](reference-pulsar-admin.md#brokers) command of the [`pulsar-admin`](reference-pulsar-admin) tool
+* The `brokers` command of the [`pulsar-admin`](https://pulsar.apache.org/tools/pulsar-admin/) tool
 * The `/admin/v2/brokers` endpoint of the admin {@inject: rest:REST:/} API
-* The `brokers` method of the {@inject: javadoc:PulsarAdmin:/admin/org/apache/pulsar/client/admin/PulsarAdmin.html} object in the [Java API](client-libraries-java)
+* The `brokers` method of the `PulsarAdmin` object in the [Java API](client-libraries-java)
 
 In addition to being configurable when you start them up, brokers can also be [dynamically configured](#dynamic-broker-configuration).
 
diff --git a/site2/website-next/versioned_docs/version-2.2.1/admin-api-clusters.md b/site2/website-next/versioned_docs/version-2.2.1/admin-api-clusters.md
index 972c7e1..8687ae6 100644
--- a/site2/website-next/versioned_docs/version-2.2.1/admin-api-clusters.md
+++ b/site2/website-next/versioned_docs/version-2.2.1/admin-api-clusters.md
@@ -2,7 +2,6 @@
 id: admin-api-clusters
 title: Managing Clusters
 sidebar_label: "Clusters"
-original_id: admin-api-clusters
 ---
 
 import Tabs from '@theme/Tabs';
@@ -13,7 +12,7 @@ import TabItem from '@theme/TabItem';
 >
 > This page only shows **some frequently used operations**.
 >
-> - For the latest and complete information about `Pulsar admin`, including commands, flags, descriptions, and more information, see [Pulsar admin doc](https://pulsar.apache.org/tools/pulsar-admin/).
+> - For the latest and complete information about `Pulsar admin`, including commands, flags, descriptions, and more, see [Pulsar admin doc](https://pulsar.apache.org/tools/pulsar-admin/)
 > 
 > - For the latest and complete information about `REST API`, including parameters, responses, samples, and more, see {@inject: rest:REST:/} API doc.
 > 
@@ -24,9 +23,9 @@ servers (aka [bookies](reference-terminology.md#bookie)), and a [ZooKeeper](http
 
 Clusters can be managed via:
 
-* The [`clusters`](reference-pulsar-admin.md#clusters) command of the [`pulsar-admin`](reference-pulsar-admin) tool
+* The `clusters` command of the [`pulsar-admin`]([reference-pulsar-admin.md](https://pulsar.apache.org/tools/pulsar-admin/)) tool
 * The `/admin/v2/clusters` endpoint of the admin {@inject: rest:REST:/} API
-* The `clusters` method of the {@inject: javadoc:PulsarAdmin:/admin/org/apache/pulsar/client/admin/PulsarAdmin} object in the [Java API](client-libraries-java)
+* The `clusters` method of the `PulsarAdmin` object in the [Java API](client-libraries-java)
 
 ## Clusters resources
 
diff --git a/site2/website-next/versioned_docs/version-2.2.1/admin-api-namespaces.md b/site2/website-next/versioned_docs/version-2.2.1/admin-api-namespaces.md
index 216cb6f..c53fa3c 100644
--- a/site2/website-next/versioned_docs/version-2.2.1/admin-api-namespaces.md
+++ b/site2/website-next/versioned_docs/version-2.2.1/admin-api-namespaces.md
@@ -2,7 +2,6 @@
 id: admin-api-namespaces
 title: Managing Namespaces
 sidebar_label: "Namespaces"
-original_id: admin-api-namespaces
 ---
 
 import Tabs from '@theme/Tabs';
@@ -23,9 +22,9 @@ Pulsar [namespaces](reference-terminology.md#namespace) are logical groupings of
 
 Namespaces can be managed via:
 
-* The [`namespaces`](reference-pulsar-admin.md#clusters) command of the [`pulsar-admin`](reference-pulsar-admin) tool
+* The `namespaces` command of the [`pulsar-admin`](https://pulsar.apache.org/tools/pulsar-admin/) tool
 * The `/admin/v2/namespaces` endpoint of the admin {@inject: rest:REST:/} API
-* The `namespaces` method of the {@inject: javadoc:PulsarAdmin:/admin/org/apache/pulsar/client/admin/PulsarAdmin} object in the [Java API](client-libraries-java)
+* The `namespaces` method of the `PulsarAdmin` object in the [Java API](client-libraries-java)
 
 ## Namespaces resources
 
@@ -49,8 +48,12 @@ $ pulsar-admin namespaces create test-tenant/test-namespace
 </TabItem>
 <TabItem value="REST API">
 
+```
+
 {@inject: endpoint|PUT|/admin/v2/namespaces/:tenant/:namespace|operation/createNamespace?version=@pulsar:version_number@}
 
+```
+
 </TabItem>
 <TabItem value="Java">
 
@@ -105,8 +108,12 @@ $ pulsar-admin namespaces policies test-tenant/test-namespace
 </TabItem>
 <TabItem value="REST API">
 
+```
+
 {@inject: endpoint|GET|/admin/v2/namespaces/:tenant/:namespace|operation/getPolicies?version=@pulsar:version_number@}
 
+```
+
 </TabItem>
 <TabItem value="Java">
 
@@ -142,8 +149,12 @@ test-tenant/ns2
 </TabItem>
 <TabItem value="REST API">
 
+```
+
 {@inject: endpoint|GET|/admin/v2/namespaces/:tenant|operation/getTenantNamespaces?version=@pulsar:version_number@}
 
+```
+
 </TabItem>
 <TabItem value="Java">
 
@@ -177,8 +188,12 @@ $ pulsar-admin namespaces delete test-tenant/ns1
 </TabItem>
 <TabItem value="REST API">
 
+```
+
 {@inject: endpoint|DELETE|/admin/v2/namespaces/:tenant/:namespace|operation/deleteNamespace?version=@pulsar:version_number@}
 
+```
+
 </TabItem>
 <TabItem value="Java">
 
@@ -196,7 +211,7 @@ admin.namespaces().deleteNamespace(namespace);
 
 #### Set replication cluster
 
-It sets replication clusters for a namespace, so Pulsar can internally replicate publish message from one colo to another colo.
+You can set replication clusters for a namespace to enable Pulsar to internally replicate the published messages from one colocation facility to another.
 
 <Tabs 
   defaultValue="pulsar-admin"
@@ -234,7 +249,7 @@ admin.namespaces().setNamespaceReplicationClusters(namespace, clusters);
 
 #### Get replication cluster
 
-It gives a list of replication clusters for a given namespace.
+You can get the list of replication clusters for a given namespace.
 
 <Tabs 
   defaultValue="pulsar-admin"
@@ -281,13 +296,13 @@ admin.namespaces().getNamespaceReplicationClusters(namespace)
 
 Backlog quota helps the broker to restrict bandwidth/storage of a namespace once it reaches a certain threshold limit. Admin can set the limit and take corresponding action after the limit is reached.
 
-  1.  producer_request_hold: broker will hold and not persist produce request payload
+  1.  producer_request_hold: broker holds but not persists produce request payload
 
-  2.  producer_exception: broker disconnects with the client by giving an exception.
+  2.  producer_exception: broker disconnects with the client by giving an exception
 
-  3.  consumer_backlog_eviction: broker will start discarding backlog messages
+  3.  consumer_backlog_eviction: broker starts discarding backlog messages
 
-  Backlog quota restriction can be taken care by defining restriction of backlog-quota-type: destination_storage
+Backlog quota restriction can be taken care by defining restriction of backlog-quota-type: destination_storage.
 
 <Tabs 
   defaultValue="pulsar-admin"
@@ -300,12 +315,6 @@ $ pulsar-admin namespaces set-backlog-quota --limit 10G --limitTime 36000 --poli
 
 ```
 
-```
-
-N/A
-
-```
-
 </TabItem>
 <TabItem value="REST API">
 
@@ -330,7 +339,7 @@ admin.namespaces().setBacklogQuota(namespace, new BacklogQuota(limit, limitTime,
 
 #### Get backlog quota policies
 
-It shows a configured backlog quota for a given namespace.
+You can get a configured backlog quota for a given namespace.
 
 <Tabs 
   defaultValue="pulsar-admin"
@@ -378,7 +387,7 @@ admin.namespaces().getBacklogQuotaMap(namespace);
 
 #### Remove backlog quota policies
 
-It removes backlog quota policies for a given namespace
+You can remove backlog quota policies for a given namespace.
 
 <Tabs 
   defaultValue="pulsar-admin"
@@ -391,12 +400,6 @@ $ pulsar-admin namespaces remove-backlog-quota test-tenant/ns1
 
 ```
 
-```
-
-N/A
-
-```
-
 </TabItem>
 <TabItem value="REST API">
 
@@ -423,7 +426,7 @@ admin.namespaces().removeBacklogQuota(namespace, backlogQuotaType)
 
 #### Set persistence policies
 
-Persistence policies allow to configure persistency-level for all topic messages under a given namespace.
+Persistence policies allow users to configure persistency-level for all topic messages under a given namespace.
 
   -   Bookkeeper-ack-quorum: Number of acks (guaranteed copies) to wait for each entry, default: 0
 
@@ -444,12 +447,6 @@ $ pulsar-admin namespaces set-persistence --bookkeeper-ack-quorum 2 --bookkeeper
 
 ```
 
-```
-
-N/A
-
-```
-
 </TabItem>
 <TabItem value="REST API">
 
@@ -474,7 +471,7 @@ admin.namespaces().setPersistence(namespace,new PersistencePolicies(bookkeeperEn
 
 #### Get persistence policies
 
-It shows the configured persistence policies of a given namespace.
+You can get the configured persistence policies of a given namespace.
 
 <Tabs 
   defaultValue="pulsar-admin"
@@ -537,12 +534,6 @@ $ pulsar-admin namespaces unload --bundle 0x00000000_0xffffffff test-tenant/ns1
 
 ```
 
-```
-
-N/A
-
-```
-
 </TabItem>
 <TabItem value="REST API">
 
@@ -567,8 +558,7 @@ admin.namespaces().unloadNamespaceBundle(namespace, bundle)
 
 #### Split namespace bundles
 
-Each namespace bundle can contain multiple topics and each bundle can be served by only one broker. 
-If a single bundle is creating an excessive load on a broker, an admin splits the bundle using this command permitting one or more of the new bundles to be unloaded thus spreading the load across the brokers.
+One namespace bundle can contain multiple topics but can be served by only one broker. If a single bundle is creating an excessive load on a broker, an admin can split the bundle using the command below, permitting one or more of the new bundles to be unloaded, thus balancing the load across the brokers.
 
 <Tabs 
   defaultValue="pulsar-admin"
@@ -581,12 +571,6 @@ $ pulsar-admin namespaces split-bundle --bundle 0x00000000_0xffffffff test-tenan
 
 ```
 
-```
-
-N/A
-
-```
-
 </TabItem>
 <TabItem value="REST API">
 
@@ -613,7 +597,7 @@ admin.namespaces().splitNamespaceBundle(namespace, bundle)
 
 #### Set message-ttl
 
-It configures message’s time to live (in seconds) duration.
+You can configure the time to live (in seconds) duration for messages. In the example below, the message-ttl is set as 100s.
 
 <Tabs 
   defaultValue="pulsar-admin"
@@ -626,12 +610,6 @@ $ pulsar-admin namespaces set-message-ttl --messageTTL 100 test-tenant/ns1
 
 ```
 
-```
-
-N/A
-
-```
-
 </TabItem>
 <TabItem value="REST API">
 
@@ -656,7 +634,7 @@ admin.namespaces().setNamespaceMessageTTL(namespace, messageTTL)
 
 #### Get message-ttl
 
-It gives a message ttl of configured namespace.
+When the message-ttl for a namespace is set, you can use the command below to get the configured value. This example comtinues the example of the command `set message-ttl`, so the returned value is 100(s).
 
 <Tabs 
   defaultValue="pulsar-admin"
@@ -684,6 +662,12 @@ $ pulsar-admin namespaces get-message-ttl test-tenant/ns1
 
 ```
 
+```
+
+100
+
+```
+
 </TabItem>
 <TabItem value="Java">
 
@@ -693,6 +677,12 @@ admin.namespaces().getNamespaceMessageTTL(namespace)
 
 ```
 
+```
+
+100
+
+```
+
 </TabItem>
 
 </Tabs>
@@ -712,12 +702,6 @@ $ pulsar-admin namespaces remove-message-ttl test-tenant/ns1
 
 ```
 
-```
-
-100
-
-```
-
 </TabItem>
 <TabItem value="REST API">
 
@@ -758,12 +742,6 @@ $ pulsar-admin namespaces clear-backlog --sub my-subscription test-tenant/ns1
 
 ```
 
-```
-
-N/A
-
-```
-
 </TabItem>
 <TabItem value="REST API">
 
@@ -801,12 +779,6 @@ $ pulsar-admin namespaces clear-backlog  --bundle 0x00000000_0xffffffff  --sub m
 
 ```
 
-```
-
-N/A
-
-```
-
 </TabItem>
 <TabItem value="REST API">
 
@@ -842,13 +814,7 @@ Each namespace contains multiple topics and the retention size (storage size) of
 
 ```
 
-$ pulsar-admin set-retention --size 100 --time 10 test-tenant/ns1
-
-```
-
-```
-
-N/A
+$ pulsar-admin namespaces set-retention --size 100 --time 10 test-tenant/ns1
 
 ```
 
@@ -932,9 +898,7 @@ disables the throttling.
 :::note
 
 - If neither `clusterDispatchRate` nor `topicDispatchRate` is configured, dispatch throttling is disabled.
->
 - If `topicDispatchRate` is not configured, `clusterDispatchRate` takes effect.
-> 
 - If `topicDispatchRate` is configured, `topicDispatchRate` takes effect.
 
 :::
diff --git a/site2/website-next/versioned_docs/version-2.2.1/admin-api-non-persistent-topics.md b/site2/website-next/versioned_docs/version-2.2.1/admin-api-non-persistent-topics.md
index 12220de..78dac35 100644
--- a/site2/website-next/versioned_docs/version-2.2.1/admin-api-non-persistent-topics.md
+++ b/site2/website-next/versioned_docs/version-2.2.1/admin-api-non-persistent-topics.md
@@ -2,7 +2,6 @@
 id: admin-api-non-persistent-topics
 title: Managing non-persistent topics
 sidebar_label: "Non-Persistent topics"
-original_id: admin-api-non-persistent-topics
 ---
 
 For details of the content, refer to [manage topics](admin-api-topics).
\ No newline at end of file
diff --git a/site2/website-next/versioned_docs/version-2.2.1/admin-api-overview.md b/site2/website-next/versioned_docs/version-2.2.1/admin-api-overview.md
index 7936a9c..bd1e1f5 100644
--- a/site2/website-next/versioned_docs/version-2.2.1/admin-api-overview.md
+++ b/site2/website-next/versioned_docs/version-2.2.1/admin-api-overview.md
@@ -2,7 +2,6 @@
 id: admin-api-overview
 title: Pulsar admin interface
 sidebar_label: "Overview"
-original_id: admin-api-overview
 ---
 
 import Tabs from '@theme/Tabs';
diff --git a/site2/website-next/versioned_docs/version-2.2.1/admin-api-partitioned-topics.md b/site2/website-next/versioned_docs/version-2.2.1/admin-api-partitioned-topics.md
index 6734586..7221b3d 100644
--- a/site2/website-next/versioned_docs/version-2.2.1/admin-api-partitioned-topics.md
+++ b/site2/website-next/versioned_docs/version-2.2.1/admin-api-partitioned-topics.md
@@ -2,7 +2,6 @@
 id: admin-api-partitioned-topics
 title: Managing partitioned topics
 sidebar_label: "Partitioned topics"
-original_id: admin-api-partitioned-topics
 ---
 
 For details of the content, refer to [manage topics](admin-api-topics).
\ No newline at end of file
diff --git a/site2/website-next/versioned_docs/version-2.2.1/admin-api-permissions.md b/site2/website-next/versioned_docs/version-2.2.1/admin-api-permissions.md
index e2ca469..faedbf1 100644
--- a/site2/website-next/versioned_docs/version-2.2.1/admin-api-permissions.md
+++ b/site2/website-next/versioned_docs/version-2.2.1/admin-api-permissions.md
@@ -2,7 +2,6 @@
 id: admin-api-permissions
 title: Managing permissions
 sidebar_label: "Permissions"
-original_id: admin-api-permissions
 ---
 
 import Tabs from '@theme/Tabs';
@@ -13,7 +12,7 @@ import TabItem from '@theme/TabItem';
 >
 > This page only shows **some frequently used operations**.
 >
-> - For the latest and complete information about `Pulsar admin`, including commands, flags, descriptions, and more information, see [Pulsar admin doc](https://pulsar.apache.org/tools/pulsar-admin/).
+> - For the latest and complete information about `Pulsar admin`, including commands, flags, descriptions, and more, see [Pulsar admin doc](https://pulsar.apache.org/tools/pulsar-admin/)
 > 
 > - For the latest and complete information about `REST API`, including parameters, responses, samples, and more, see {@inject: rest:REST:/} API doc.
 > 
diff --git a/site2/website-next/versioned_docs/version-2.2.1/admin-api-schemas.md b/site2/website-next/versioned_docs/version-2.2.1/admin-api-schemas.md
index 9ffe21f..8399a03 100644
--- a/site2/website-next/versioned_docs/version-2.2.1/admin-api-schemas.md
+++ b/site2/website-next/versioned_docs/version-2.2.1/admin-api-schemas.md
@@ -2,6 +2,5 @@
 id: admin-api-schemas
 title: Managing Schemas
 sidebar_label: "Schemas"
-original_id: admin-api-schemas
 ---
 
diff --git a/site2/website-next/versioned_docs/version-2.2.1/admin-api-tenants.md b/site2/website-next/versioned_docs/version-2.2.1/admin-api-tenants.md
index fe68336..570ac31 100644
--- a/site2/website-next/versioned_docs/version-2.2.1/admin-api-tenants.md
+++ b/site2/website-next/versioned_docs/version-2.2.1/admin-api-tenants.md
@@ -2,7 +2,6 @@
 id: admin-api-tenants
 title: Managing Tenants
 sidebar_label: "Tenants"
-original_id: admin-api-tenants
 ---
 
 import Tabs from '@theme/Tabs';
@@ -13,7 +12,7 @@ import TabItem from '@theme/TabItem';
 >
 > This page only shows **some frequently used operations**.
 >
-> - For the latest and complete information about `Pulsar admin`, including commands, flags, descriptions, and more information, see [Pulsar admin doc](https://pulsar.apache.org/tools/pulsar-admin/).
+> - For the latest and complete information about `Pulsar admin`, including commands, flags, descriptions, and more, see [Pulsar admin doc](https://pulsar.apache.org/tools/pulsar-admin/)
 > 
 > - For the latest and complete information about `REST API`, including parameters, responses, samples, and more, see {@inject: rest:REST:/} API doc.
 > 
@@ -80,22 +79,26 @@ $ pulsar-admin tenants create my-tenant
 
 ```
 
-When creating a tenant, you can assign admin roles using the `-r`/`--admin-roles` flag. You can specify multiple roles as a comma-separated list. Here are some examples:
+When creating a tenant, you can optionally assign admin roles using the `-r`/`--admin-roles`
+flag, and clusters using the `-c`/`--allowed-clusters` flag. You can specify multiple values
+as a comma-separated list. Here are some examples:
 
 ```shell
 
 $ pulsar-admin tenants create my-tenant \
-  --admin-roles role1,role2,role3
+  --admin-roles role1,role2,role3 \
+  --allowed-clusters cluster1
 
 $ pulsar-admin tenants create my-tenant \
   -r role1
+  -c cluster1
 
 ```
 
 </TabItem>
 <TabItem value="REST API">
 
-{@inject: endpoint|POST|/admin/v2/tenants/:tenant|operation/createTenant?version=@pulsar:version_number@}
+{@inject: endpoint|PUT|/admin/v2/tenants/:tenant|operation/createTenant?version=@pulsar:version_number@}
 
 </TabItem>
 <TabItem value="JAVA">
@@ -140,7 +143,7 @@ $ pulsar-admin tenants get my-tenant
 </TabItem>
 <TabItem value="REST API">
 
-{@inject: endpoint|GET|/admin/v2/tenants/:cluster|operation/getTenant?version=@pulsar:version_number@}
+{@inject: endpoint|GET|/admin/v2/tenants/:tenant|operation/getTenant?version=@pulsar:version_number@}
 
 </TabItem>
 <TabItem value="JAVA">
@@ -175,7 +178,7 @@ $ pulsar-admin tenants delete my-tenant
 </TabItem>
 <TabItem value="REST API">
 
-{@inject: endpoint|DELETE|/admin/v2/tenants/:cluster|operation/deleteTenant?version=@pulsar:version_number@}
+{@inject: endpoint|DELETE|/admin/v2/tenants/:tenant|operation/deleteTenant?version=@pulsar:version_number@}
 
 </TabItem>
 <TabItem value="JAVA">
@@ -210,7 +213,7 @@ $ pulsar-admin tenants update my-tenant
 </TabItem>
 <TabItem value="REST API">
 
-{@inject: endpoint|DELETE|/admin/v2/tenants/:cluster|operation/updateTenant?version=@pulsar:version_number@}
+{@inject: endpoint|POST|/admin/v2/tenants/:tenant|operation/updateTenant?version=@pulsar:version_number@}
 
 </TabItem>
 <TabItem value="JAVA">
diff --git a/site2/website-next/versioned_docs/version-2.2.1/administration-dashboard.md b/site2/website-next/versioned_docs/version-2.2.1/administration-dashboard.md
index 514b076..1eb0404 100644
--- a/site2/website-next/versioned_docs/version-2.2.1/administration-dashboard.md
+++ b/site2/website-next/versioned_docs/version-2.2.1/administration-dashboard.md
@@ -7,7 +7,7 @@ original_id: administration-dashboard
 
 :::note
 
-Pulsar dashboard is deprecated. If you want to manage and monitor the stats of your topics, use [Pulsar Manager](administration-pulsar-manager). 
+Pulsar dashboard is deprecated. We recommend you use [Pulsar Manager](administration-pulsar-manager) to manage and monitor the stats of your topics. 
 
 :::
 
@@ -53,17 +53,17 @@ $ docker run -p 80:80 \
 ```
 
  
-You need to specify only one service URL for a Pulsar cluster. Internally, the collector figures out all the existing clusters and the brokers from where it needs to pull the metrics. If you connect the dashboard to Pulsar running in standalone mode, the URL is `http://<broker-ip>:8080` by default. `<broker-ip>` is the ip address or hostname of the machine running Pulsar standalone. The ip address or hostname should be accessible from the docker instance running dashboard.
+You need to specify only one service URL for a Pulsar cluster. Internally, the collector figures out all the existing clusters and the brokers from where it needs to pull the metrics. If you connect the dashboard to Pulsar running in standalone mode, the URL is `http://<broker-ip>:8080` by default. `<broker-ip>` is the IP address or hostname of the machine that runs Pulsar standalone. The IP address or hostname should be accessible from the running dashboard in the docker instance.
 
-Once the Docker container runs, the web dashboard is accessible via `localhost` or whichever host that Docker uses.
+Once the Docker container starts, the web dashboard is accessible via `localhost` or whichever host that Docker uses.
 
-> The `SERVICE_URL` that the dashboard uses needs to be reachable from inside the Docker container
+> The `SERVICE_URL` that the dashboard uses needs to be reachable from inside the Docker container.
 
 If the Pulsar service runs in standalone mode in `localhost`, the `SERVICE_URL` has to
-be the IP of the machine.
+be the IP address of the machine.
 
 Similarly, given the Pulsar standalone advertises itself with localhost by default, you need to
-explicitly set the advertise address to the host IP. For example:
+explicitly set the advertise address to the host IP address. For example:
 
 ```shell
 
diff --git a/site2/website-next/versioned_docs/version-2.2.1/administration-geo.md b/site2/website-next/versioned_docs/version-2.2.1/administration-geo.md
index 9c93a64..d956817 100644
--- a/site2/website-next/versioned_docs/version-2.2.1/administration-geo.md
+++ b/site2/website-next/versioned_docs/version-2.2.1/administration-geo.md
@@ -2,9 +2,12 @@
 id: administration-geo
 title: Pulsar geo-replication
 sidebar_label: "Geo-replication"
-original_id: administration-geo
 ---
 
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
+
+
 *Geo-replication* is the replication of persistently stored message data across multiple clusters of a Pulsar instance.
 
 ## How geo-replication works
@@ -44,8 +47,6 @@ All messages produced in any of the three clusters are delivered to all subscrip
 
 ## Configure replication
 
-As stated in [Geo-replication and Pulsar properties](#geo-replication-and-pulsar-properties) section, geo-replication in Pulsar is managed at the [tenant](reference-terminology.md#tenant) level.
-
 The following example connects three clusters: **us-east**, **us-west**, and **us-cent**.
 
 ### Connect replication clusters
@@ -107,7 +108,11 @@ $ bin/pulsar-admin tenants create my-tenant \
 
 To update permissions of an existing tenant, use `update` instead of `create`.
 
-### Enable geo-replication namespaces
+### Enable geo-replication 
+
+You can enable geo-replication at **namespace** or **topic** level.
+
+#### Enable geo-replication at namespace level
 
 You can create a namespace with the following command sample.
 
@@ -126,11 +131,24 @@ $ bin/pulsar-admin namespaces set-clusters my-tenant/my-namespace \
 
 ```
 
-You can change the replication clusters for a namespace at any time, without disruption to ongoing traffic. Replication channels are immediately set up or stopped in all clusters as soon as the configuration changes.
+#### Enable geo-replication at topic level
 
-### Use topics with geo-replication
+You can set geo-replication at topic level using the command `pulsar-admin topics set-replication-clusters`. For the latest and complete information about `Pulsar admin`, including commands, flags, descriptions, and more information, see [Pulsar admin doc](https://pulsar.apache.org/tools/pulsar-admin/).
+
+```shell
+
+$ bin/pulsar-admin topics set-replication-clusters --clusters us-west,us-east,us-cent my-tenant/my-namespace/my-topic
+
+```
 
-Once you create a geo-replication namespace, any topics that producers or consumers create within that namespace is replicated across clusters. Typically, each application uses the `serviceUrl` for the local cluster.
+:::tip
+
+- You can change the replication clusters for a namespace at any time, without disruption to ongoing traffic. Replication channels are immediately set up or stopped in all clusters as soon as the configuration changes.
+- Once you create a geo-replication namespace, any topics that producers or consumers create within that namespace are replicated across clusters. Typically, each application uses the `serviceUrl` for the local cluster.
+
+:::
+
+### Use topics with geo-replication
 
 #### Selective replication
 
@@ -158,14 +176,30 @@ producer.newMessage()
 
 #### Topic stats
 
-Topic-specific statistics for geo-replication topics are available via the [`pulsar-admin`](reference-pulsar-admin) tool and {@inject: rest:REST:/} API:
+You can check topic-specific statistics for geo-replication topics using one of the following methods.
+
+<Tabs 
+  defaultValue="pulsar-admin"
+  values={[{"label":"pulsar-admin","value":"pulsar-admin"},{"label":"REST API","value":"REST API"}]}>
+<TabItem value="pulsar-admin">
+
+Use the [`pulsar-admin topics stats`](https://pulsar.apache.org/tools/pulsar-admin/) command.
 
 ```shell
 
-$ bin/pulsar-admin persistent stats persistent://my-tenant/my-namespace/my-topic
+$ bin/pulsar-admin topics stats persistent://my-tenant/my-namespace/my-topic
 
 ```
 
+</TabItem>
+<TabItem value="REST API">
+
+{@inject: endpoint|GET|/admin/v2/:schema/:tenant/:namespace/:topic/stats|operation/getStats?version=@pulsar:version_number@}
+
+</TabItem>
+
+</Tabs>
+
 Each cluster reports its own local stats, including the incoming and outgoing replication rates and backlogs.
 
 #### Delete a geo-replication topic
@@ -212,4 +246,4 @@ Consumer<String> consumer = client.newConsumer(Schema.STRING)
 ### Limitations
 
 * When you enable replicated subscription, you're creating a consistent distributed snapshot to establish an association between message ids from different clusters. The snapshots are taken periodically. The default value is `1 second`. It means that a consumer failing over to a different cluster can potentially receive 1 second of duplicates. You can also configure the frequency of the snapshot in the `broker.conf` file.
-* Only the base line cursor position is synced in replicated subscriptions while the individual acknowledgments are not synced. This means the messages acknowledged out-of-order could end up getting delivered again, in the case of a cluster failover.
+* Only the base line cursor position is synced in replicated subscriptions while the individual acknowledgments are not synced. This means the messages acknowledged out-of-order could end up getting delivered again, in the case of a cluster failover.
\ No newline at end of file
diff --git a/site2/website-next/versioned_docs/version-2.2.1/administration-proxy.md b/site2/website-next/versioned_docs/version-2.2.1/administration-proxy.md
index c046ed3..3cef937 100644
--- a/site2/website-next/versioned_docs/version-2.2.1/administration-proxy.md
+++ b/site2/website-next/versioned_docs/version-2.2.1/administration-proxy.md
@@ -2,10 +2,9 @@
 id: administration-proxy
 title: Pulsar proxy
 sidebar_label: "Pulsar proxy"
-original_id: administration-proxy
 ---
 
-Pulsar proxy is an optional gateway. Pulsar proxy is used when direction connections between clients and Pulsar brokers are either infeasible or undesirable. For example, when you run Pulsar in a cloud environment or on [Kubernetes](https://kubernetes.io) or an analogous platform, you can run Pulsar proxy.
+Pulsar proxy is an optional gateway. Pulsar proxy is used when direct connections between clients and Pulsar brokers are either infeasible or undesirable. For example, when you run Pulsar in a cloud environment or on [Kubernetes](https://kubernetes.io) or an analogous platform, you can run Pulsar proxy.
 
 ## Configure the proxy
 
diff --git a/site2/website-next/versioned_docs/version-2.2.1/administration-stats.md b/site2/website-next/versioned_docs/version-2.2.1/administration-stats.md
index ac0c036..2ccd73c 100644
--- a/site2/website-next/versioned_docs/version-2.2.1/administration-stats.md
+++ b/site2/website-next/versioned_docs/version-2.2.1/administration-stats.md
@@ -2,7 +2,6 @@
 id: administration-stats
 title: Pulsar stats
 sidebar_label: "Pulsar statistics"
-original_id: administration-stats
 ---
 
 ## Partitioned topics
diff --git a/site2/website-next/versioned_docs/version-2.2.1/administration-zk-bk.md b/site2/website-next/versioned_docs/version-2.2.1/administration-zk-bk.md
index de10d50..e5f9688 100644
--- a/site2/website-next/versioned_docs/version-2.2.1/administration-zk-bk.md
+++ b/site2/website-next/versioned_docs/version-2.2.1/administration-zk-bk.md
@@ -2,7 +2,6 @@
 id: administration-zk-bk
 title: ZooKeeper and BookKeeper administration
 sidebar_label: "ZooKeeper and BookKeeper"
-original_id: administration-zk-bk
 ---
 
 Pulsar relies on two external systems for essential tasks:
diff --git a/site2/website-next/versioned_docs/version-2.2.1/client-libraries-cpp.md b/site2/website-next/versioned_docs/version-2.2.1/client-libraries-cpp.md
index 333ec67..958861a 100644
--- a/site2/website-next/versioned_docs/version-2.2.1/client-libraries-cpp.md
+++ b/site2/website-next/versioned_docs/version-2.2.1/client-libraries-cpp.md
@@ -2,7 +2,6 @@
 id: client-libraries-cpp
 title: Pulsar C++ client
 sidebar_label: "C++"
-original_id: client-libraries-cpp
 ---
 
 You can use Pulsar C++ client to create Pulsar producers and consumers in C++.
@@ -11,7 +10,7 @@ All the methods in producer, consumer, and reader of a C++ client are thread-saf
 
 ## Supported platforms
 
-Pulsar C++ client is supported on **Linux** and **MacOS** platforms.
+Pulsar C++ client is supported on **Linux** ,**MacOS** and **Windows** platforms.
 
 [Doxygen](http://www.doxygen.nl/)-generated API docs for the C++ client are available [here](/api/cpp).
 
@@ -21,8 +20,8 @@ You need to install the following components before using the C++ client:
 
 * [CMake](https://cmake.org/)
 * [Boost](http://www.boost.org/)
-* [Protocol Buffers](https://developers.google.com/protocol-buffers/) 2.6
-* [libcurl](https://curl.haxx.se/libcurl/)
+* [Protocol Buffers](https://developers.google.com/protocol-buffers/) >= 3
+* [libcurl](https://curl.se/libcurl/)
 * [Google Test](https://github.com/google/googletest)
 
 ## Linux
@@ -147,6 +146,12 @@ $ rpm -ivh apache-pulsar-client*.rpm
 
 After you install RPM successfully, Pulsar libraries are in the `/usr/lib` directory.
 
+:::note
+
+If you get the error that `libpulsar.so: cannot open shared object file: No such file or directory` when starting Pulsar client, you may need to run `ldconfig` first.
+
+:::
+
 ### Install Debian
 
 1. Download a Debian package from the links in the table. 
@@ -236,10 +241,8 @@ $ export OPENSSL_INCLUDE_DIR=/usr/local/opt/openssl/include/
 $ export OPENSSL_ROOT_DIR=/usr/local/opt/openssl/
 
 # Protocol Buffers installation
-$ brew tap homebrew/versions
-$ brew install protobuf260
-$ brew install boost
-$ brew install log4cxx
+$ brew install protobuf boost boost-python log4cxx
+# If you are using python3, you need to install boost-python3 
 
 # Google Test installation
 $ git clone https://github.com/google/googletest.git
@@ -269,6 +272,50 @@ brew install libpulsar
 
 ```
 
+## Windows (64-bit)
+
+### Compilation
+
+1. Clone the Pulsar repository.
+
+```shell
+
+$ git clone https://github.com/apache/pulsar
+
+```
+
+2. Install all necessary dependencies.
+
+```shell
+
+cd ${PULSAR_HOME}/pulsar-client-cpp
+vcpkg install --feature-flags=manifests --triplet x64-windows
+
+```
+
+3. Build C++ libraries.
+
+```shell
+
+cmake -B ./build -A x64 -DBUILD_PYTHON_WRAPPER=OFF -DBUILD_TESTS=OFF -DVCPKG_TRIPLET=x64-windows -DCMAKE_BUILD_TYPE=Release -S .
+cmake --build ./build --config Release
+
+```
+
+> **NOTE**
+>
+> 1. For Windows 32-bit, you need to use `-A Win32` and `-DVCPKG_TRIPLET=x86-windows`.
+> 2. For MSVC Debug mode, you need to replace `Release` with `Debug` for both `CMAKE_BUILD_TYPE` variable and `--config` option.
+
+4. Client libraries are available in the following places.
+
+```
+
+${PULSAR_HOME}/pulsar-client-cpp/build/lib/Release/pulsar.lib
+${PULSAR_HOME}/pulsar-client-cpp/build/lib/Release/pulsar.dll
+
+```
+
 ## Connection URLs
 
 To connect Pulsar using client libraries, you need to specify a Pulsar protocol URL.
@@ -299,109 +346,361 @@ pulsar+ssl://pulsar.us-west.example.com:6651
 
 ## Create a consumer
 
-To use Pulsar as a consumer, you need to create a consumer on the C++ client. The following is an example. 
+To use Pulsar as a consumer, you need to create a consumer on the C++ client. There are two main ways of using the consumer:
+- [Blocking style](#blocking-example): synchronously calling `receive(msg)`.
+- [Non-blocking](#consumer-with-a-message-listener) (event based) style: using a message listener.
+
+### Blocking example
+
+The benefit of this approach is that it is the simplest code. Simply keeps calling `receive(msg)` which blocks until a message is received.
+
+This example starts a subscription at the earliest offset and consumes 100 messages.
 
 ```c++
 
-Client client("pulsar://localhost:6650");
+#include <pulsar/Client.h>
+
+using namespace pulsar;
+
+int main() {
+    Client client("pulsar://localhost:6650");
+
+    Consumer consumer;
+    ConsumerConfiguration config;
+    config.setSubscriptionInitialPosition(InitialPositionEarliest);
+    Result result = client.subscribe("persistent://public/default/my-topic", "consumer-1", config, consumer);
+    if (result != ResultOk) {
+        std::cout << "Failed to subscribe: " << result << std::endl;
+        return -1;
+    }
 
-Consumer consumer;
-Result result = client.subscribe("my-topic", "my-subscription-name", consumer);
-if (result != ResultOk) {
-    LOG_ERROR("Failed to subscribe: " << result);
-    return -1;
+    Message msg;
+    int ctr = 0;
+    // consume 100 messages
+    while (ctr < 100) {
+        consumer.receive(msg);
+        std::cout << "Received: " << msg
+            << "  with payload '" << msg.getDataAsString() << "'" << std::endl;
+
+        consumer.acknowledge(msg);
+        ctr++;
+    }
+
+    std::cout << "Finished consuming synchronously!" << std::endl;
+
+    client.close();
+    return 0;
 }
 
-Message msg;
+```
+
+### Consumer with a message listener
+
+You can avoid  running a loop with blocking calls with an event based style by using a message listener which is invoked for each message that is received.
+
+This example starts a subscription at the earliest offset and consumes 100 messages.
 
-while (true) {
-    consumer.receive(msg);
-    LOG_INFO("Received: " << msg
-            << "  with payload '" << msg.getDataAsString() << "'");
+```c++
+
+#include <pulsar/Client.h>
+#include <atomic>
+#include <thread>
+
+using namespace pulsar;
+
+std::atomic<uint32_t> messagesReceived;
+
+void handleAckComplete(Result res) {
+    std::cout << "Ack res: " << res << std::endl;
+}
 
-    consumer.acknowledge(msg);
+void listener(Consumer consumer, const Message& msg) {
+    std::cout << "Got message " << msg << " with content '" << msg.getDataAsString() << "'" << std::endl;
+    messagesReceived++;
+    consumer.acknowledgeAsync(msg.getMessageId(), handleAckComplete);
 }
 
-client.close();
+int main() {
+    Client client("pulsar://localhost:6650");
+
+    Consumer consumer;
+    ConsumerConfiguration config;
+    config.setMessageListener(listener);
+    config.setSubscriptionInitialPosition(InitialPositionEarliest);
+    Result result = client.subscribe("persistent://public/default/my-topic", "consumer-1", config, consumer);
+    if (result != ResultOk) {
+        std::cout << "Failed to subscribe: " << result << std::endl;
+        return -1;
+    }
+
+    // wait for 100 messages to be consumed
+    while (messagesReceived < 100) {
+        std::this_thread::sleep_for(std::chrono::milliseconds(100));
+    }
+
+    std::cout << "Finished consuming asynchronously!" << std::endl;
+
+    client.close();
+    return 0;
+}
 
 ```
 
 ## Create a producer
 
-To use Pulsar as a producer, you need to create a producer on the C++ client. The following is an example. 
+To use Pulsar as a producer, you need to create a producer on the C++ client. There are two main ways of using a producer:
+- [Blocking style](#simple-blocking-example) : each call to `send` waits for an ack from the broker.
+- [Non-blocking asynchronous style](#non-blocking-example) : `sendAsync` is called instead of `send` and a callback is supplied for when the ack is received from the broker.
+
+### Simple blocking example
+
+This example sends 100 messages using the blocking style. While simple, it does not produce high throughput as it waits for each ack to come back before sending the next message.
 
 ```c++
 
-Client client("pulsar://localhost:6650");
+#include <pulsar/Client.h>
+#include <thread>
 
-Producer producer;
-Result result = client.createProducer("my-topic", producer);
-if (result != ResultOk) {
-    LOG_ERROR("Error creating producer: " << result);
-    return -1;
-}
+using namespace pulsar;
+
+int main() {
+    Client client("pulsar://localhost:6650");
+
+    Result result = client.createProducer("persistent://public/default/my-topic", producer);
+    if (result != ResultOk) {
+        std::cout << "Error creating producer: " << result << std::endl;
+        return -1;
+    }
+
+    // Send 100 messages synchronously
+    int ctr = 0;
+    while (ctr < 100) {
+        std::string content = "msg" + std::to_string(ctr);
+        Message msg = MessageBuilder().setContent(content).setProperty("x", "1").build();
+        Result result = producer.send(msg);
+        if (result != ResultOk) {
+            std::cout << "The message " << content << " could not be sent, received code: " << result << std::endl;
+        } else {
+            std::cout << "The message " << content << " sent successfully" << std::endl;
+        }
+
+        std::this_thread::sleep_for(std::chrono::milliseconds(100));
+        ctr++;
+    }
+
+    std::cout << "Finished producing synchronously!" << std::endl;
 
-// Publish 10 messages to the topic
-for (int i = 0; i < 10; i++){
-    Message msg = MessageBuilder().setContent("my-message").build();
-    Result res = producer.send(msg);
-    LOG_INFO("Message sent: " << res);
+    client.close();
+    return 0;
 }
-client.close();
 
 ```
 
-## Enable authentication in connection URLs
-If you use TLS authentication when connecting to Pulsar, you need to add `ssl` in the connection URLs, and the default port is `6651`. The following is an example.
+### Non-blocking example
 
-```cpp
+This example sends 100 messages using the non-blocking style calling `sendAsync` instead of `send`. This allows the producer to have multiple messages inflight at a time which increases throughput.
 
-ClientConfiguration config = ClientConfiguration();
-config.setUseTls(true);
-config.setTlsTrustCertsFilePath("/path/to/cacert.pem");
-config.setTlsAllowInsecureConnection(false);
-config.setAuth(pulsar::AuthTls::create(
-            "/path/to/client-cert.pem", "/path/to/client-key.pem"););
+The producer configuration `blockIfQueueFull` is useful here to avoid `ResultProducerQueueIsFull` errors when the internal queue for outgoing send requests becomes full. Once the internal queue is full, `sendAsync` becomes blocking which can make your code simpler.
 
-Client client("pulsar+ssl://my-broker.com:6651", config);
+Without this configuration, the result code `ResultProducerQueueIsFull` is passed to the callback. You must decide how to deal with that (retry, discard etc).
+
+```c++
+
+#include <pulsar/Client.h>
+#include <thread>
+
+using namespace pulsar;
+
+std::atomic<uint32_t> acksReceived;
+
+void callback(Result code, const MessageId& msgId, std::string msgContent) {
+    // message processing logic here
+    std::cout << "Received ack for msg: " << msgContent << " with code: "
+        << code << " -- MsgID: " << msgId << std::endl;
+    acksReceived++;
+}
+
+int main() {
+    Client client("pulsar://localhost:6650");
+
+    ProducerConfiguration producerConf;
+    producerConf.setBlockIfQueueFull(true);
+    Producer producer;
+    Result result = client.createProducer("persistent://public/default/my-topic",
+                                          producerConf, producer);
+    if (result != ResultOk) {
+        std::cout << "Error creating producer: " << result << std::endl;
+        return -1;
+    }
+
+    // Send 100 messages asynchronously
+    int ctr = 0;
+    while (ctr < 100) {
+        std::string content = "msg" + std::to_string(ctr);
+        Message msg = MessageBuilder().setContent(content).setProperty("x", "1").build();
+        producer.sendAsync(msg, std::bind(callback,
+                                          std::placeholders::_1, std::placeholders::_2, content));
+
+        std::this_thread::sleep_for(std::chrono::milliseconds(100));
+        ctr++;
+    }
+
+    // wait for 100 messages to be acked
+    while (acksReceived < 100) {
+        std::this_thread::sleep_for(std::chrono::milliseconds(100));
+    }
+
+    std::cout << "Finished producing asynchronously!" << std::endl;
+
+    client.close();
+    return 0;
+}
 
 ```
 
-For complete examples, refer to [C++ client examples](https://github.com/apache/pulsar/tree/master/pulsar-client-cpp/examples).
+### Partitioned topics and lazy producers
 
-## Schema
+When scaling out a Pulsar topic, you may configure a topic to have hundreds of partitions. Likewise, you may have also scaled out your producers so there are hundreds or even thousands of producers. This can put some strain on the Pulsar brokers as when you create a producer on a partitioned topic, internally it creates one internal producer per partition which involves communications to the brokers for each one. So for a topic with 1000 partitions and 1000 producers, it ends up creating [...]
 
-This section describes some examples about schema. For more information about schema, see [Pulsar schema](schema-get-started).
+You can reduce the load caused by this combination of a large number of partitions and many producers by doing the following:
+- use SinglePartition partition routing mode (this ensures that all messages are only sent to a single, randomly selected partition)
+- use non-keyed messages (when messages are keyed, routing is based on the hash of the key and so messages will end up being sent to multiple partitions)
+- use lazy producers (this ensures that an internal producer is only created on demand when a message needs to be routed to a partition)
 
-### Create producer with Avro schema
+With our example above, that reduces the number of internal producers spread out over the 1000 producer apps from 1,000,000 to just 1000.
 
-The following example shows how to create a producer with an Avro schema.
+Note that there can be extra latency for the first message sent. If you set a low send timeout, this timeout could be reached if the initial connection handshake is slow to complete.
 
-```cpp
+```c++
 
-static const std::string exampleSchema =
-    "{\"type\":\"record\",\"name\":\"Example\",\"namespace\":\"test\","
-    "\"fields\":[{\"name\":\"a\",\"type\":\"int\"},{\"name\":\"b\",\"type\":\"int\"}]}";
-Producer producer;
 ProducerConfiguration producerConf;
-producerConf.setSchema(SchemaInfo(AVRO, "Avro", exampleSchema));
-client.createProducer("topic-avro", producerConf, producer);
+producerConf.setPartitionsRoutingMode(ProducerConfiguration::UseSinglePartition);
+producerConf.setLazyStartPartitionedProducers(true);
 
 ```
 
-### Create consumer with Avro schema
-
-The following example shows how to create a consumer with an Avro schema.
+## Enable authentication in connection URLs
+If you use TLS authentication when connecting to Pulsar, you need to add `ssl` in the connection URLs, and the default port is `6651`. The following is an example.
 
 ```cpp
 
-static const std::string exampleSchema =
-    "{\"type\":\"record\",\"name\":\"Example\",\"namespace\":\"test\","
-    "\"fields\":[{\"name\":\"a\",\"type\":\"int\"},{\"name\":\"b\",\"type\":\"int\"}]}";
-ConsumerConfiguration consumerConf;
-Consumer consumer;
-consumerConf.setSchema(SchemaInfo(AVRO, "Avro", exampleSchema));
-client.subscribe("topic-avro", "sub-2", consumerConf, consumer)
+ClientConfiguration config = ClientConfiguration();
+config.setUseTls(true);
+config.setTlsTrustCertsFilePath("/path/to/cacert.pem");
+config.setTlsAllowInsecureConnection(false);
+config.setAuth(pulsar::AuthTls::create(
+            "/path/to/client-cert.pem", "/path/to/client-key.pem"););
+
+Client client("pulsar+ssl://my-broker.com:6651", config);
 
 ```
 
+For complete examples, refer to [C++ client examples](https://github.com/apache/pulsar/tree/master/pulsar-client-cpp/examples).
+
+## Schema
+
+This section describes some examples about schema. For more information about
+schema, see [Pulsar schema](schema-get-started).
+
+### Avro schema
+
+- The following example shows how to create a producer with an Avro schema.
+
+  ```cpp
+  
+  static const std::string exampleSchema =
+      "{\"type\":\"record\",\"name\":\"Example\",\"namespace\":\"test\","
+      "\"fields\":[{\"name\":\"a\",\"type\":\"int\"},{\"name\":\"b\",\"type\":\"int\"}]}";
+  Producer producer;
+  ProducerConfiguration producerConf;
+  producerConf.setSchema(SchemaInfo(AVRO, "Avro", exampleSchema));
+  client.createProducer("topic-avro", producerConf, producer);
+  
+  ```
+
+- The following example shows how to create a consumer with an Avro schema.
+
+  ```cpp
+  
+  static const std::string exampleSchema =
+      "{\"type\":\"record\",\"name\":\"Example\",\"namespace\":\"test\","
+      "\"fields\":[{\"name\":\"a\",\"type\":\"int\"},{\"name\":\"b\",\"type\":\"int\"}]}";
+  ConsumerConfiguration consumerConf;
+  Consumer consumer;
+  consumerConf.setSchema(SchemaInfo(AVRO, "Avro", exampleSchema));
+  client.subscribe("topic-avro", "sub-2", consumerConf, consumer)
+  
+  ```
+
+### ProtobufNative schema
+
+The following example shows how to create a producer and a consumer with a ProtobufNative schema.
+​
+1. Generate the `User` class using Protobuf3. 
+
+   :::note
+
+   You need to use Protobuf3 or later versions.
+
+   :::
+
+​
+
+   ```protobuf
+   
+   syntax = "proto3";
+   
+   message User {
+       string name = 1;
+       int32 age = 2;
+   }
+   
+   ```
+
+​
+2. Include the `ProtobufNativeSchema.h` in your source code. Ensure the Protobuf dependency has been added to your project.
+​
+
+   ```c++
+   
+   #include <pulsar/ProtobufNativeSchema.h>
+   
+   ```
+
+​
+3. Create a producer to send a `User` instance.
+​
+
+   ```c++
+   
+   ProducerConfiguration producerConf;
+   producerConf.setSchema(createProtobufNativeSchema(User::GetDescriptor()));
+   Producer producer;
+   client.createProducer("topic-protobuf", producerConf, producer);
+   User user;
+   user.set_name("my-name");
+   user.set_age(10);
+   std::string content;
+   user.SerializeToString(&content);
+   producer.send(MessageBuilder().setContent(content).build());
+   
+   ```
+
+​
+4. Create a consumer to receive a `User` instance.
+​
+
+   ```c++
+   
+   ConsumerConfiguration consumerConf;
+   consumerConf.setSchema(createProtobufNativeSchema(User::GetDescriptor()));
+   consumerConf.setSubscriptionInitialPosition(InitialPositionEarliest);
+   Consumer consumer;
+   client.subscribe("topic-protobuf", "my-sub", consumerConf, consumer);
+   Message msg;
+   consumer.receive(msg);
+   User user2;
+   user2.ParseFromArray(msg.getData(), msg.getLength());
+   
+   ```
+
diff --git a/site2/website-next/versioned_docs/version-2.2.1/client-libraries-python.md b/site2/website-next/versioned_docs/version-2.2.1/client-libraries-python.md
index 7ebc6be..e28c5e9 100644
--- a/site2/website-next/versioned_docs/version-2.2.1/client-libraries-python.md
+++ b/site2/website-next/versioned_docs/version-2.2.1/client-libraries-python.md
@@ -2,10 +2,13 @@
 id: client-libraries-python
 title: Pulsar Python client
 sidebar_label: "Python"
-original_id: client-libraries-python
 ---
 
-Pulsar Python client library is a wrapper over the existing [C++ client library](client-libraries-cpp) and exposes all of the [same features](/api/cpp). You can find the code in the [`python` subdirectory](https://github.com/apache/pulsar/tree/master/pulsar-client-cpp/python) of the C++ client code.
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
+
+
+Pulsar Python client library is a wrapper over the existing [C++ client library](client-libraries-cpp) and exposes all of the [same features](/api/cpp). You can find the code in the [Python directory](https://github.com/apache/pulsar/tree/master/pulsar-client-cpp/python) of the C++ client code.
 
 All the methods in producer, consumer, and reader of a Python client are thread-safe.
 
@@ -13,7 +16,7 @@ All the methods in producer, consumer, and reader of a Python client are thread-
 
 ## Install
 
-You can install the [`pulsar-client`](https://pypi.python.org/pypi/pulsar-client) library either via [PyPi](https://pypi.python.org/pypi), using [pip](#installation-using-pip), or by building the library from source.
+You can install the [`pulsar-client`](https://pypi.python.org/pypi/pulsar-client) library either via [PyPi](https://pypi.python.org/pypi), using [pip](#installation-using-pip), or by building the library from [source](https://github.com/apache/pulsar/tree/master/pulsar-client-cpp).
 
 ### Install using pip
 
@@ -26,8 +29,7 @@ $ pip install pulsar-client==@pulsar:version_number@
 ```
 
 ### Optional dependencies
-
-To support aspects like pulsar functions or Avro serialization, additional optional components can be installed alongside the  `pulsar-client` library
+If you install the client libraries on Linux to support services like Pulsar functions or Avro serialization, you can install optional components alongside the  `pulsar-client` library.
 
 ```shell
 
@@ -69,7 +71,7 @@ The complete Python API reference is available at [api/python](/api/python).
 
 ## Examples
 
-You can find a variety of Python code examples for the `pulsar-client` library.
+You can find a variety of Python code examples for the [pulsar-client](/pulsar-client-cpp/python) library.
 
 ### Producer example
 
@@ -167,7 +169,7 @@ while True:
 
 In addition to subscribing a consumer to a single Pulsar topic, you can also subscribe to multiple topics simultaneously. To use multi-topic subscriptions, you can supply a regular expression (regex) or a `List` of topics. If you select topics via regex, all topics must be within the same Pulsar namespace.
 
-The following is an example. 
+The following is an example: 
 
 ```python
 
@@ -188,67 +190,6 @@ client.close()
 
 ## Schema
 
-### Declare and validate schema
-
-You can declare a schema by passing a class that inherits
-from `pulsar.schema.Record` and defines the fields as
-class variables. For example:
-
-```python
-
-from pulsar.schema import *
-
-class Example(Record):
-    a = String()
-    b = Integer()
-    c = Boolean()
-
-```
-
-With this simple schema definition, you can create producers, consumers and readers instances that refer to that.
-
-```python
-
-producer = client.create_producer(
-                    topic='my-topic',
-                    schema=AvroSchema(Example) )
-
-producer.send(Example(a='Hello', b=1))
-
-```
-
-After creating the producer, the Pulsar broker validates that the existing topic schema is indeed of "Avro" type and that the format is compatible with the schema definition of the `Example` class.
-
-If there is a mismatch, an exception occurs in the producer creation.
-
-Once a producer is created with a certain schema definition,
-it will only accept objects that are instances of the declared
-schema class.
-
-Similarly, for a consumer/reader, the consumer will return an
-object, instance of the schema record class, rather than the raw
-bytes:
-
-```python
-
-consumer = client.subscribe(
-                  topic='my-topic',
-                  subscription_name='my-subscription',
-                  schema=AvroSchema(Example) )
-
-while True:
-    msg = consumer.receive()
-    ex = msg.value()
-    try:
-        print("Received message a={} b={} c={}".format(ex.a, ex.b, ex.c))
-        # Acknowledge successful processing of the message
-        consumer.acknowledge(msg)
-    except:
-        # Message failed to be processed
-        consumer.negative_acknowledge(msg)
-
-```
-
 ### Supported schema types
 
 You can use different builtin schema types in Pulsar. All the definitions are in the `pulsar.schema` package.
@@ -338,6 +279,245 @@ class Example(Record):
 
 ```
 
+##### Set namespace for Avro schema
+
+Set the namespace for Avro Record schema using the special field `_avro_namespace`.
+
+```python
+
+class NamespaceDemo(Record):
+   _avro_namespace = 'xxx.xxx.xxx'
+   x = String()
+   y = Integer()
+
+```
+
+The schema definition is like this.
+
+```
+
+{
+  'name': 'NamespaceDemo', 'namespace': 'xxx.xxx.xxx', 'type': 'record', 'fields': [
+    {'name': 'x', 'type': ['null', 'string']}, 
+    {'name': 'y', 'type': ['null', 'int']}
+  ]
+}
+
+```
+
+### Declare and validate schema
+
+You can send messages using `BytesSchema`, `StringSchema`, `AvroSchema`, and `JsonSchema`.
+
+Before the producer is created, the Pulsar broker validates that the existing topic schema is the correct type and that the format is compatible with the schema definition of a class. If the format of the topic schema is incompatible with the schema definition, an exception occurs in the producer creation.
+
+Once a producer is created with a certain schema definition, it only accepts objects that are instances of the declared schema class.
+
+Similarly, for a consumer or reader, the consumer returns an object (which is an instance of the schema record class) rather than raw bytes.
+
+**Example**
+
+```python
+
+consumer = client.subscribe(
+                  topic='my-topic',
+                  subscription_name='my-subscription',
+                  schema=AvroSchema(Example) )
+
+while True:
+    msg = consumer.receive()
+    ex = msg.value()
+    try:
+        print("Received message a={} b={} c={}".format(ex.a, ex.b, ex.c))
+        # Acknowledge successful processing of the message
+        consumer.acknowledge(msg)
+    except:
+        # Message failed to be processed
+        consumer.negative_acknowledge(msg)
+
+```
+
+<Tabs 
+  defaultValue="BytesSchema"
+  values={[{"label":"BytesSchema","value":"BytesSchema"},{"label":"StringSchema","value":"StringSchema"},{"label":"AvroSchema","value":"AvroSchema"},{"label":"JsonSchema","value":"JsonSchema"}]}>
+
+<TabItem value="BytesSchema">
+
+You can send byte data using a `BytesSchema`.
+
+**Example**
+
+```python
+
+producer = client.create_producer(
+                'bytes-schema-topic',
+                schema=BytesSchema())
+producer.send(b"Hello")
+
+consumer = client.subscribe(
+				'bytes-schema-topic',
+				'sub',
+				schema=BytesSchema())
+msg = consumer.receive()
+data = msg.value()
+
+```
+
+</TabItem>
+<TabItem value="StringSchema">
+
+You can send string data using a `StringSchema`.
+
+**Example**
+
+```python
+
+producer = client.create_producer(
+                'string-schema-topic',
+                schema=StringSchema())
+producer.send("Hello")
+
+consumer = client.subscribe(
+				'string-schema-topic',
+				'sub',
+				schema=StringSchema())
+msg = consumer.receive()
+str = msg.value()
+
+```
+
+</TabItem>
+<TabItem value="AvroSchema">
+
+You can declare an `AvroSchema` using one of the following methods.
+
+#### Method 1: Record
+
+You can declare an `AvroSchema` by passing a class that inherits
+from `pulsar.schema.Record` and defines the fields as
+class variables. 
+
+**Example**
+
+```python
+
+class Example(Record):
+    a = Integer()
+    b = Integer()
+
+producer = client.create_producer(
+                'avro-schema-topic',
+                schema=AvroSchema(Example))
+r = Example(a=1, b=2)
+producer.send(r)
+
+consumer = client.subscribe(
+				'avro-schema-topic',
+				'sub',
+				schema=AvroSchema(Example))
+msg = consumer.receive()
+e = msg.value()
+
+```
+
+#### Method 2: JSON definition
+
+You can declare an `AvroSchema` using JSON. In this case, Avro schemas are defined using JSON.
+
+**Example**
+
+Below is an `AvroSchema` defined using a JSON file (_company.avsc_). 
+
+```json
+
+{
+    "doc": "this is doc",
+    "namespace": "example.avro",
+    "type": "record",
+    "name": "Company",
+    "fields": [
+        {"name": "name", "type": ["null", "string"]},
+        {"name": "address", "type": ["null", "string"]},
+        {"name": "employees", "type": ["null", {"type": "array", "items": {
+            "type": "record",
+            "name": "Employee",
+            "fields": [
+                {"name": "name", "type": ["null", "string"]},
+                {"name": "age", "type": ["null", "int"]}
+            ]
+        }}]},
+        {"name": "labels", "type": ["null", {"type": "map", "values": "string"}]}
+    ]
+}
+
+```
+
+You can load a schema definition from file by using [`avro.schema`]((http://avro.apache.org/docs/current/gettingstartedpython.html) or [`fastavro.schema`](https://fastavro.readthedocs.io/en/latest/schema.html#fastavro._schema_py.load_schema).
+
+If you use the "JSON definition" method to declare an `AvroSchema`, pay attention to the following points:
+
+- You need to use [Python dict](https://developers.google.com/edu/python/dict-files) to produce and consume messages, which is different from using the "Record" method.
+
+- When generating an `AvroSchema` object, set `_record_cls` parameter to `None`.
+
+**Example**
+
+```
+
+from fastavro.schema import load_schema
+from pulsar.schema import *
+schema_definition = load_schema("examples/company.avsc")
+avro_schema = AvroSchema(None, schema_definition=schema_definition)
+producer = client.create_producer(
+    topic=topic,
+    schema=avro_schema)
+consumer = client.subscribe(topic, 'test', schema=avro_schema)
+company = {
+    "name": "company-name" + str(i),
+    "address": 'xxx road xxx street ' + str(i),
+    "employees": [
+        {"name": "user" + str(i), "age": 20 + i},
+        {"name": "user" + str(i), "age": 30 + i},
+        {"name": "user" + str(i), "age": 35 + i},
+    ],
+    "labels": {
+        "industry": "software" + str(i),
+        "scale": ">100",
+        "funds": "1000000.0"
+    }
+}
+producer.send(company)
+msg = consumer.receive()
+# Users could get a dict object by `value()` method.
+msg.value()
+
+```
+
+</TabItem>
+<TabItem value="JsonSchema">
+
+#### Record
+
+You can declare a `JsonSchema` by passing a class that inherits
+from `pulsar.schema.Record` and defines the fields as class variables. This is similar to using `AvroSchema`. The only difference is to use  `JsonSchema` instead of `AvroSchema` when defining schema type as shown below. For how to use `AvroSchema` via record, see [here](client-libraries-python.md#method-1-record).
+
+```
+
+producer = client.create_producer(
+                'avro-schema-topic',
+                schema=JsonSchema(Example))
+
+consumer = client.subscribe(
+				'avro-schema-topic',
+				'sub',
+				schema=JsonSchema(Example))
+
+```
+
+</TabItem>
+
+</Tabs>
+
 ## End-to-end encryption
 
 [End-to-end encryption](https://pulsar.apache.org/docs/en/next/cookbooks-encryption/#docsNav) allows applications to encrypt messages at producers and decrypt messages at consumers.
@@ -450,7 +630,7 @@ This section provides step-by-step instructions on how to use the end-to-end enc
 
    ```
    
-   Received msg 'b'encryption message'' id = '(0,0,-1,-1)'
+   Received msg 'encryption message' id = '(0,0,-1,-1)'
    
    ```
 
diff --git a/site2/website-next/versioned_docs/version-2.2.1/client-libraries.md b/site2/website-next/versioned_docs/version-2.2.1/client-libraries.md
index 23e5a06..ab5b7c4 100644
--- a/site2/website-next/versioned_docs/version-2.2.1/client-libraries.md
+++ b/site2/website-next/versioned_docs/version-2.2.1/client-libraries.md
@@ -2,7 +2,6 @@
 id: client-libraries
 title: Pulsar client libraries
 sidebar_label: "Overview"
-original_id: client-libraries
 ---
 
 Pulsar supports the following client libraries:
@@ -16,7 +15,7 @@ Pulsar supports the following client libraries:
 - [C# client](client-libraries-dotnet)
 
 ## Feature matrix
-Pulsar client feature matrix for different languages is listed on [Client Features Matrix](https://github.com/apache/pulsar/wiki/Client-Features-Matrix) page.
+Pulsar client feature matrix for different languages is listed on [Pulsar Feature Matrix (Client and Function)](https://github.com/apache/pulsar/wiki/PIP-108%3A-Pulsar-Feature-Matrix-%28Client-and-Function%29) page.
 
 ## Third-party clients
 
@@ -33,3 +32,4 @@ Besides the official released clients, multiple projects on developing Pulsar cl
 | Scala | [pulsar4s](https://github.com/sksamuel/pulsar4s) | [sksamuel](https://github.com/sksamuel) | [![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0) | Idomatic, typesafe, and reactive Scala client for Apache Pulsar |
 | Rust | [pulsar-rs](https://github.com/wyyerd/pulsar-rs) | [Wyyerd Group](https://github.com/wyyerd) | [![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0) | Future-based Rust bindings for Apache Pulsar |
 | .NET | [pulsar-client-dotnet](https://github.com/fsharplang-ru/pulsar-client-dotnet) | [Lanayx](https://github.com/Lanayx) | [![GitHub](https://img.shields.io/badge/license-MIT-green.svg)](https://opensource.org/licenses/MIT) | Native .NET client for C#/F#/VB |
+| Node.js | [pulsar-flex](https://github.com/ayeo-flex-org/pulsar-flex) | [Daniel Sinai](https://github.com/danielsinai), [Ron Farkash](https://github.com/ronfarkash), [Gal Rosenberg](https://github.com/galrose)| [![GitHub](https://img.shields.io/badge/license-MIT-green.svg)](https://opensource.org/licenses/MIT) | Native Nodejs client |
diff --git a/site2/website-next/versioned_docs/version-2.2.1/concepts-architecture-overview.md b/site2/website-next/versioned_docs/version-2.2.1/concepts-architecture-overview.md
index 6a501d2..8fe0717 100644
--- a/site2/website-next/versioned_docs/version-2.2.1/concepts-architecture-overview.md
+++ b/site2/website-next/versioned_docs/version-2.2.1/concepts-architecture-overview.md
@@ -2,7 +2,6 @@
 id: concepts-architecture-overview
 title: Architecture Overview
 sidebar_label: "Architecture"
-original_id: concepts-architecture-overview
 ---
 
 At the highest level, a Pulsar instance is composed of one or more Pulsar clusters. Clusters within an instance can [replicate](concepts-replication) data amongst themselves.
@@ -146,7 +145,7 @@ Some important things to know about the Pulsar proxy:
 
 ## Service discovery
 
-[Clients](getting-started-clients) connecting to Pulsar brokers need to be able to communicate with an entire Pulsar instance using a single URL. Pulsar provides a built-in service discovery mechanism that you can set up using the instructions in the [Deploying a Pulsar instance](deploy-bare-metal.md#service-discovery-setup) guide.
+[Clients](getting-started-clients) connecting to Pulsar brokers need to be able to communicate with an entire Pulsar instance using a single URL.
 
 You can use your own service discovery system if you'd like. If you use your own system, there is just one requirement: when a client performs an HTTP request to an endpoint, such as `http://pulsar.us-west.example.com:8080`, the client needs to be redirected to *some* active broker in the desired cluster, whether via DNS, an HTTP or IP redirect, or some other means.
 
diff --git a/site2/website-next/versioned_docs/version-2.2.1/concepts-authentication.md b/site2/website-next/versioned_docs/version-2.2.1/concepts-authentication.md
index b375ecb..335da8d 100644
--- a/site2/website-next/versioned_docs/version-2.2.1/concepts-authentication.md
+++ b/site2/website-next/versioned_docs/version-2.2.1/concepts-authentication.md
@@ -2,7 +2,6 @@
 id: concepts-authentication
 title: Authentication and Authorization
 sidebar_label: "Authentication and Authorization"
-original_id: concepts-authentication
 ---
 
 Pulsar supports a pluggable [authentication](security-overview.md) mechanism which can be configured at the proxy and/or the broker. Pulsar also supports a pluggable [authorization](security-authorization) mechanism. These mechanisms work together to identify the client and its access rights on topics, namespaces and tenants.
diff --git a/site2/website-next/versioned_docs/version-2.2.1/concepts-messaging.md b/site2/website-next/versioned_docs/version-2.2.1/concepts-messaging.md
index 70977e3..c9d3ea2 100644
--- a/site2/website-next/versioned_docs/version-2.2.1/concepts-messaging.md
+++ b/site2/website-next/versioned_docs/version-2.2.1/concepts-messaging.md
@@ -2,16 +2,17 @@
 id: concepts-messaging
 title: Messaging
 sidebar_label: "Messaging"
-original_id: concepts-messaging
 ---
 
 import Tabs from '@theme/Tabs';
 import TabItem from '@theme/TabItem';
 
 
-Pulsar is built on the [publish-subscribe](https://en.wikipedia.org/wiki/Publish%E2%80%93subscribe_pattern) pattern (often abbreviated to pub-sub). In this pattern, [producers](#producers) publish messages to [topics](#topics). [Consumers](#consumers) [subscribe](#subscription-types) to those topics, process incoming messages, and send an acknowledgement when processing is complete.
+Pulsar is built on the [publish-subscribe](https://en.wikipedia.org/wiki/Publish%E2%80%93subscribe_pattern) pattern (often abbreviated to pub-sub). In this pattern, [producers](#producers) publish messages to [topics](#topics); [consumers](#consumers) [subscribe](#subscription-types) to those topics, process incoming messages, and send [acknowledgements](#acknowledgement) to the broker when processing is finished.
 
-When a subscription is created, Pulsar [retains](concepts-architecture-overview.md#persistent-storage) all messages, even if the consumer is disconnected. Retained messages are discarded only when a consumer acknowledges that those messages are processed successfully.
+When a subscription is created, Pulsar [retains](concepts-architecture-overview.md#persistent-storage) all messages, even if the consumer is disconnected. The retained messages are discarded only when a consumer acknowledges that all these messages are processed successfully. 
+
+If the consumption of a message fails and you want this message to be consumed again, you can enable [message redelivery mechanism](#message-redelivery) to request the broker to resend this message.
 
 ## Messages
 
@@ -48,17 +49,17 @@ The default size of a message is 5 MB. You can configure the max size of a messa
   
   ```
 
-> For more information on Pulsar message contents, see Pulsar [binary protocol](developing-binary-protocol).
+> For more information on Pulsar messages, see Pulsar [binary protocol](developing-binary-protocol).
 
 ## Producers
 
-A producer is a process that attaches to a topic and publishes messages to a Pulsar [broker](reference-terminology.md#broker). The Pulsar broker process the messages.
+A producer is a process that attaches to a topic and publishes messages to a Pulsar [broker](reference-terminology.md#broker). The Pulsar broker processes the messages.
 
 ### Send modes
 
 Producers send messages to brokers synchronously (sync) or asynchronously (async).
 
-| Mode       | Description |            
+| Mode       | Description |
 |:-----------|-----------|
 | Sync send  | The producer waits for an acknowledgement from the broker after sending every message. If the acknowledgment is not received, the producer treats the sending operation as a failure.                                                                                                                                                                                    |
 | Async send | The producer puts a message in a blocking queue and returns immediately. The client library sends the message to the broker in the background. If the queue is full (you can [configure](reference-configuration.md#broker) the maximum size), the producer is blocked or fails immediately when calling the API, depending on arguments passed to the producer. |
@@ -75,12 +76,12 @@ You can have different types of access modes on topics for producers.
 
 :::note
 
-Once an application creates a producer with the `Exclusive` or `WaitForExclusive` access mode successfully, the instance of the application is guaranteed to be the **only one writer** on the topic. Other producers trying to produce on this topic get errors immediately or have to wait until they get the `Exclusive` access. 
+Once an application creates a producer with `Exclusive` or `WaitForExclusive` access mode successfully, the instance of this application is guaranteed to be the **only writer** to the topic. Any other producers trying to produce messages on this topic will either get errors immediately or have to wait until they get the `Exclusive` access. 
 For more information, see [PIP 68: Exclusive Producer](https://github.com/apache/pulsar/wiki/PIP-68:-Exclusive-Producer).
 
 :::
 
-You can set producer access mode through Java Client API. For more information, see `ProducerAccessMode` in [ProducerBuilder.java](https://github.com/apache/pulsar/blob/fc5768ca3bbf92815d142fe30e6bfad70a1b4fc6/pulsar-client-api/src/main/java/org/apache/pulsar/client/api/ProducerBuilder.java).
+You can set producer access mode through Java Client API. For more information, see `ProducerAccessMode` in [ProducerBuilder.java](https://github.com/apache/pulsar/blob/fc5768ca3bbf92815d142fe30e6bfad70a1b4fc6/pulsar-client-api/src/main/java/org/apache/pulsar/client/api/ProducerBuilder.java) file.
 
 
 ### Compression
@@ -98,17 +99,17 @@ When batching is enabled, the producer accumulates and sends a batch of messages
 
 In Pulsar, batches are tracked and stored as single units rather than as individual messages. Consumer unbundles a batch into individual messages. However, scheduled messages (configured through the `deliverAt` or the `deliverAfter` parameter) are always sent as individual messages even batching is enabled.
 
-In general, a batch is acknowledged when all of its messages are acknowledged by a consumer. It means unexpected failures, negative acknowledgements, or acknowledgement timeouts can result in redelivery of all messages in a batch, even if some of the messages are acknowledged.
+In general, a batch is acknowledged when all of its messages are acknowledged by a consumer. It means that when **not all** batch messages are acknowledged, then unexpected failures, negative acknowledgements, or acknowledgement timeouts can result in a redelivery of all messages in this batch.
 
-To avoid redelivering acknowledged messages in a batch to the consumer, Pulsar introduces batch index acknowledgement since Pulsar 2.6.0. When batch index acknowledgement is enabled, the consumer filters out the batch index that has been acknowledged and sends the batch index acknowledgement request to the broker. The broker maintains the batch index acknowledgement status and tracks the acknowledgement status of each batch index to avoid dispatching acknowledged messages to the consumer [...]
+To avoid redelivering acknowledged messages in a batch to the consumer, Pulsar introduces batch index acknowledgement since Pulsar 2.6.0. When batch index acknowledgement is enabled, the consumer filters out the batch index that has been acknowledged and sends the batch index acknowledgement request to the broker. The broker maintains the batch index acknowledgement status and tracks the acknowledgement status of each batch index to avoid dispatching acknowledged messages to the consumer [...]
 
 By default, batch index acknowledgement is disabled (`acknowledgmentAtBatchIndexLevelEnabled=false`). You can enable batch index acknowledgement by setting the `acknowledgmentAtBatchIndexLevelEnabled` parameter to `true` at the broker side. Enabling batch index acknowledgement results in more memory overheads. 
 
 ### Chunking
-When you enable chunking, read the following instructions.
+Before you enable chunking, read the following instructions.
 - Batching and chunking cannot be enabled simultaneously. To enable chunking, you must disable batching in advance.
 - Chunking is only supported for persisted topics.
-- Chunking is only supported for the exclusive and failover subscription types.
+- Chunking is only supported for Exclusive and Failover subscription types.
 
 When chunking is enabled (`chunkingEnabled=true`), if the message size is greater than the allowed maximum publish-payload size, the producer splits the original message into chunked messages and publishes them with chunked metadata to the broker separately and in order. At the broker side, the chunked messages are stored in the managed-ledger in the same way as that of ordinary messages. The only difference is that the consumer needs to buffer the chunked messages and combines them into [...]
 
@@ -149,71 +150,142 @@ Client libraries provide listener implementation for consumers. For example, the
 
 ### Acknowledgement
 
-When a consumer consumes a message successfully, the consumer sends an acknowledgement request to the broker. This message is permanently stored, and then deleted only after all the subscriptions have acknowledged it. If you want to store the message that has been acknowledged by a consumer, you need to configure the [message retention policy](concepts-messaging.md#message-retention-and-expiry).
+The consumer sends an acknowledgement request to the broker after it consumes a message successfully. Then, this consumed message will be permanently stored, and be deleted only after all the subscriptions have acknowledged it. If you want to store the messages that have been acknowledged by a consumer, you need to configure the [message retention policy](concepts-messaging.md#message-retention-and-expiry).
+
+For batch messages, you can enable batch index acknowledgement to avoid dispatching acknowledged messages to the consumer. For details about batch index acknowledgement, see [batching](#batching).
+
+Messages can be acknowledged in one of the following two ways:
+
+- Being acknowledged individually. With individual acknowledgement, the consumer acknowledges each message and sends an acknowledgement request to the broker.
+- Being acknowledged cumulatively. With cumulative acknowledgement, the consumer **only** acknowledges the last message it received. All messages in the stream up to (and including) the provided message are not redelivered to that consumer.
 
-For a batch message, if batch index acknowledgement is enabled, the broker maintains the batch index acknowledgement status and tracks the acknowledgement status of each batch index to avoid dispatching acknowledged messages to the consumer. When all indexes of the batch message are acknowledged, the batch message is deleted. For details about the batch index acknowledgement, see [batching](#batching).
+If you want to acknowledge messages individually, you can use the following API.
 
-Messages can be acknowledged in the following two ways:
+```java
+
+consumer.acknowledge(msg);
+
+```
 
-- Messages are acknowledged individually. With individual acknowledgement, the consumer needs to acknowledge each message and sends an acknowledgement request to the broker.
-- Messages are acknowledged cumulatively. With cumulative acknowledgement, the consumer only needs to acknowledge the last message it received. All messages in the stream up to (and including) the provided message are not re-delivered to that consumer.
+If you want to acknowledge messages cumulatively, you can use the following API.
+
+```java
+
+consumer.acknowledgeCumulative(msg);
+
+```
 
 :::note
 
-Cumulative acknowledgement cannot be used in [Shared subscription type](#subscription-types), because this subscription type involves multiple consumers which have access to the same subscription. In Shared subscription type, messages are acknowledged individually.
+Cumulative acknowledgement cannot be used in [Shared subscription type](#subscription-types), because Shared subscription type involves multiple consumers which have access to the same subscription. In Shared subscription type, messages are acknowledged individually.
 
 :::
 
 ### Negative acknowledgement
 
-When a consumer does not consume a message successfully at a time, and wants to consume the message again, the consumer sends a negative acknowledgement to the broker, and then the broker redelivers the message.
+The [negative acknowledgement](#negative-acknowledgement) mechanism allows you to send a notification to the broker indicating the consumer did not process a message.  When a consumer fails to consume a message and needs to re-consume it, the consumer sends a negative acknowledgement (nack) to the broker, triggering the broker to redeliver this message to the consumer.
+
+Messages are negatively acknowledged individually or cumulatively, depending on the consumption subscription type.
+
+In Exclusive and Failover subscription types, consumers only negatively acknowledge the last message they receive.
 
-Messages are negatively acknowledged either individually or cumulatively, depending on the consumption subscription type.
+In Shared and Key_Shared subscription types, consumers can negatively acknowledge messages individually.
 
-In the exclusive and failover subscription types, consumers only negatively acknowledge the last message they receive.
+Be aware that negative acknowledgments on ordered subscription types, such as Exclusive, Failover and Key_Shared, might cause failed messages being sent to consumers out of the original order.
 
-In the shared and Key_Shared subscription types, you can negatively acknowledge messages individually.
+Use the following API to negatively acknowledge message consumption.
 
-Be aware that negative acknowledgment on ordered subscription types, such as Exclusive, Failover and Key_Shared, can cause failed messages to arrive consumers out of the original order.
+```java
+
+Consumer<byte[]> consumer = pulsarClient.newConsumer()
+                .topic(topic)
+                .subscriptionName("sub-negative-ack")
+                .subscriptionInitialPosition(SubscriptionInitialPosition.Earliest)
+                .negativeAckRedeliveryDelay(2, TimeUnit.SECONDS) // the default value is 1 min
+                .subscribe();
+
+Message<byte[]> message = consumer.receive();
+
+// call the API to send negative acknowledgement
+consumer.negativeAcknowledge(message);
+
+message = consumer.receive();
+consumer.acknowledge(message);
+
+```
 
 :::note
 
-If batching is enabled, other messages and the negatively acknowledged messages in the same batch are redelivered to the consumer.
+If batching is enabled, all messages in one batch are redelivered to the consumer.
 
 :::
 
+### Negative redelivery backoff
+
+It happens sometimes that consumers fail to process messages successfully. In this case, you can use [negative acknowledgement](#negative-acknowledgement) to redeliver the messages after consumption failures. For the Shared subscription type, the messages are redelivered to other consumers; for other subscription types, the messages are redelivered to the same consumer.
+
+But this is not flexible enough. A better way is to use the **redelivery backoff mechanism**. You can redeliver messages with different delays by setting the number of times the messages are retried.
+
+Use the following API to enable `Negative Redelivery Backoff`.
+
+```java
+
+consumer.negativeAckRedeliveryBackoff(NegativeAckRedeliveryExponentialBackoff.builder()
+        .minNackTimeMs(1000)
+        .maxNackTimeMs(60 * 1000)
+        .build())
+
+```
+
 ### Acknowledgement timeout
 
-If a message is not consumed successfully, and you want to trigger the broker to redeliver the message automatically, you can adopt the unacknowledged message automatic re-delivery mechanism. Client tracks the unacknowledged messages within the entire `acktimeout` time range, and sends a `redeliver unacknowledged messages` request to the broker automatically when the acknowledgement timeout is specified.
+The acknowledgement timeout mechanism allows you to set a time range during which the client tracks the unacknowledged messages. After this acknowledgement timeout (`ackTimeout`) period, the client sends `redeliver unacknowledged messages` request to the broker, thus the broker resends the unacknowledged messages to the consumer.
+
+You can configure the acknowledgement timeout mechanism to redeliver the message if it is not acknowledged after `ackTimeout` or to execute a timer task to check the acknowledgement timeout messages during every `ackTimeoutTickTime` period.
 
 :::note
 
-If batching is enabled, other messages and the unacknowledged messages in the same batch are redelivered to the consumer.
+- If batching is enabled, all messages in one batch are redelivered to the consumer.  
+- Compared with acknowledgement timeout, negative acknowledgement is preferred. First, it is difficult to set a timeout value. Second, a broker resends messages when the message processing time exceeds the acknowledgement timeout, but these messages might not need to be re-consumed.
 
 :::
 
-:::note
+Use the following API to enable acknowledgement timeout.
 
-Prefer negative acknowledgements over acknowledgement timeout. Negative acknowledgement controls the re-delivery of individual messages with more precision, and avoids invalid redeliveries when the message processing time exceeds the acknowledgement timeout.
+```java
 
-:::
+Consumer<byte[]> consumer = pulsarClient.newConsumer()
+                .topic(topic)
+                .ackTimeout(2, TimeUnit.SECONDS) // the default value is 0
+                .ackTimeoutTickTime(1, TimeUnit.SECONDS)
+                .subscriptionName("sub")
+                .subscriptionInitialPosition(SubscriptionInitialPosition.Earliest)
+                .subscribe();
+
+Message<byte[]> message = consumer.receive();
+
+// wait at least 2 seconds
+message = consumer.receive();
+consumer.acknowledge(message);
+
+```
 
 ### Dead letter topic
 
-Dead letter topic enables you to consume new messages when some messages cannot be consumed successfully by a consumer. In this mechanism, messages that are failed to be consumed are stored in a separate topic, which is called dead letter topic. You can decide how to handle messages in the dead letter topic.
+Dead letter topic allows you to continue message consumption even some messages are not consumed successfully. The messages that are failed to be consumed are stored in a specific topic, which is called dead letter topic. You can decide how to handle the messages in the dead letter topic.
 
-The following example shows how to enable dead letter topic in a Java client using the default dead letter topic:
+Enable dead letter topic in a Java client using the default dead letter topic.
 
 ```java
 
 Consumer<byte[]> consumer = pulsarClient.newConsumer(Schema.BYTES)
-              .topic(topic)
-              .subscriptionName("my-subscription")
-              .subscriptionType(SubscriptionType.Shared)
-              .deadLetterPolicy(DeadLetterPolicy.builder()
-                    .maxRedeliverCount(maxRedeliveryCount)
-                    .build())
-              .subscribe();
+                .topic(topic)
+                .subscriptionName("my-subscription")
+                .subscriptionType(SubscriptionType.Shared)
+                .deadLetterPolicy(DeadLetterPolicy.builder()
+                      .maxRedeliverCount(maxRedeliveryCount)
+                      .build())
+                .subscribe();
 
 ```
 
@@ -225,38 +297,37 @@ The default dead letter topic uses this format:
 
 ```
 
-  
-If you want to specify the name of the dead letter topic, use this Java client example:
+Use the Java client to specify the name of the dead letter topic.
 
 ```java
 
 Consumer<byte[]> consumer = pulsarClient.newConsumer(Schema.BYTES)
-              .topic(topic)
-              .subscriptionName("my-subscription")
-              .subscriptionType(SubscriptionType.Shared)
-              .deadLetterPolicy(DeadLetterPolicy.builder()
-                    .maxRedeliverCount(maxRedeliveryCount)
-                    .deadLetterTopic("your-topic-name")
-                    .build())
-              .subscribe();
+                .topic(topic)
+                .subscriptionName("my-subscription")
+                .subscriptionType(SubscriptionType.Shared)
+                .deadLetterPolicy(DeadLetterPolicy.builder()
+                      .maxRedeliverCount(maxRedeliveryCount)
+                      .deadLetterTopic("your-topic-name")
+                      .build())
+                .subscribe();
 
 ```
 
-Dead letter topic depends on message re-delivery. Messages are redelivered either due to [acknowledgement timeout](#acknowledgement-timeout) or [negative acknowledgement](#negative-acknowledgement). If you are going to use negative acknowledgement on a message, make sure it is negatively acknowledged before the acknowledgement timeout. 
+Dead letter topic depends on message redelivery. Messages are redelivered either due to [acknowledgement timeout](#acknowledgement-timeout) or [negative acknowledgement](#negative-acknowledgement). If you are going to use negative acknowledgement on a message, make sure it is negatively acknowledged before the acknowledgement timeout. 
 
 :::note
 
-Currently, dead letter topic is enabled In the shared and Key_Shared subscription types.
+Currently, dead letter topic is enabled in Shared and Key_Shared subscription types.
 
 :::
 
 ### Retry letter topic
 
-For many online business systems, a message is re-consumed due to exception occurs in the business logic processing. To configure the delay time for re-consuming the failed messages, you can configure the producer to send messages to both the business topic and the retry letter topic, and enable automatic retry on the consumer. When automatic retry is enabled on the consumer, a message is stored in the retry letter topic if the messages are not consumed, and therefore the consumer automa [...]
+For many online business systems, a message is re-consumed when exception occurs in the business logic processing. To configure the delay time for re-consuming the failed messages, you can configure the producer to send messages to both the business topic and the retry letter topic, and enable automatic retry on the consumer. With this setting, the messages that are not consumed will be stored in the retry letter topic. After the specified delay time, the consumer automatically consumes  [...]
 
 By default, automatic retry is disabled. You can set `enableRetry` to `true` to enable automatic retry on the consumer.
 
-This example shows how to consume messages from a retry letter topic.
+Use the following API to consume messages from a retry letter topic.
 
 ```java
 
@@ -275,6 +346,44 @@ Consumer<byte[]> consumer = pulsarClient.newConsumer(Schema.BYTES)
 
 ```
 
+The messages in the retry letter topic contain some special properties that are automatically created by the client.
+
+Special property | Description
+:--------------------|:-----------
+`REAL_TOPIC` | The real topic name.
+`ORIGIN_MESSAGE_ID` | The origin message ID. It is crucial for message tracking.
+`RECONSUMETIMES`   | The retry consume times.
+`DELAY_TIME`      | Message delay timeMs.
+**Example**
+
+```
+
+REAL_TOPIC = persistent://public/default/my-topic
+ORIGIN_MESSAGE_ID = 1:0:-1:0
+RECONSUMETIMES = 6
+DELAY_TIME = 3000
+
+```
+
+Use the following API to store the messages in a retrial queue.
+
+```java
+
+consumer.reconsumeLater(msg, 3, TimeUnit.SECONDS);
+
+```
+
+Use the following API to add custom properties for the `reconsumeLater` function.
+
+```java
+
+Map<String, String> customProperties = new HashMap<String, String>();
+customProperties.put("custom-key-1", "custom-value-1");
+customProperties.put("custom-key-2", "custom-value-2");
+consumer.reconsumeLater(msg, customProperties, 3, TimeUnit.SECONDS);
+
+```
+
 ## Topics
 
 As in other pub-sub systems, topics in Pulsar are named channels for transmitting messages from producers to consumers. Topic names are URLs that have a well-defined structure:
@@ -292,7 +401,7 @@ Topic name component | Description
 `namespace`          | The administrative unit of the topic, which acts as a grouping mechanism for related topics. Most topic configuration is performed at the [namespace](#namespaces) level. Each tenant has one or multiple namespaces.
 `topic`              | The final part of the name. Topic names have no special meaning in a Pulsar instance.
 
-> **No need to explicitly create new topics**
+> **No need to explicitly create new topics**  
 > You do not need to explicitly create topics in Pulsar. If a client attempts to write or receive messages to/from a topic that does not yet exist, Pulsar creates that topic under the namespace provided in the [topic name](#topics) automatically.
 > If no tenant or namespace is specified when a client creates a topic, the topic is created in the default tenant and namespace. You can also create a topic in a specified tenant and namespace, such as `persistent://my-tenant/my-namespace/my-topic`. `persistent://my-tenant/my-namespace/my-topic` means the `my-topic` topic is created in the `my-namespace` namespace of the `my-tenant` tenant.
 
@@ -306,18 +415,19 @@ A subscription is a named configuration rule that determines how messages are de
 
 ![Subscription types](/assets/pulsar-subscription-types.png)
 
-> **Pub-Sub or Queuing**
+> **Pub-Sub or Queuing**  
 > In Pulsar, you can use different subscriptions flexibly.
 > * If you want to achieve traditional "fan-out pub-sub messaging" among consumers, specify a unique subscription name for each consumer. It is exclusive subscription type.
 > * If you want to achieve "message queuing" among consumers, share the same subscription name among multiple consumers(shared, failover, key_shared).
 > * If you want to achieve both effects simultaneously, combine exclusive subscription type with other subscription types for consumers.
 
 ### Subscription types
+
 When a subscription has no consumers, its subscription type is undefined. The type of a subscription is defined when a consumer connects to it, and the type can be changed by restarting all consumers with a different configuration.
 
 #### Exclusive
 
-In *exclusive* type, only a single consumer is allowed to attach to the subscription. If multiple consumers subscribe to a topic using the same subscription, an error occurs.
+In *Exclusive* type, only a single consumer is allowed to attach to the subscription. If multiple consumers subscribe to a topic using the same subscription, an error occurs.
 
 In the diagram below, only **Consumer A-0** is allowed to consume messages.
 
@@ -339,11 +449,11 @@ In the diagram below, **Consumer-B-0** is the master consumer while **Consumer-B
 
 #### Shared
 
-In *shared* or *round robin* mode, multiple consumers can attach to the same subscription. Messages are delivered in a round robin distribution across consumers, and any given message is delivered to only one consumer. When a consumer disconnects, all the messages that were sent to it and not acknowledged will be rescheduled for sending to the remaining consumers.
+In *shared* or *round robin* type, multiple consumers can attach to the same subscription. Messages are delivered in a round robin distribution across consumers, and any given message is delivered to only one consumer. When a consumer disconnects, all the messages that were sent to it and not acknowledged will be rescheduled for sending to the remaining consumers.
 
 In the diagram below, **Consumer-C-1** and **Consumer-C-2** are able to subscribe to the topic, but **Consumer-C-3** and others could as well.
 
-> **Limitations of Shared type**
+> **Limitations of Shared type**  
 > When using Shared type, be aware that:
 > * Message ordering is not guaranteed.
 > * You cannot use cumulative acknowledgment with Shared type.
@@ -352,17 +462,61 @@ In the diagram below, **Consumer-C-1** and **Consumer-C-2** are able to subscrib
 
 #### Key_Shared
 
-In *Key_Shared* mode, multiple consumers can attach to the same subscription. Messages are delivered in a distribution across consumers and message with same key or same ordering key are delivered to only one consumer. No matter how many times the message is re-delivered, it is delivered to the same consumer. When a consumer connected or disconnected will cause served consumer change for some key of message.
+In *Key_Shared* type, multiple consumers can attach to the same subscription. Messages are delivered in a distribution across consumers and message with same key or same ordering key are delivered to only one consumer. No matter how many times the message is re-delivered, it is delivered to the same consumer. When a consumer connected or disconnected will cause served consumer change for some key of message.
+
+![Key_Shared subscriptions](/assets/pulsar-key-shared-subscriptions.png)
+
+Note that when the consumers are using the Key_Shared subscription type, you need to **disable batching** or **use key-based batching** for the producers. There are two reasons why the key-based batching is necessary for Key_Shared subscription type:
+1. The broker dispatches messages according to the keys of the messages, but the default batching approach might fail to pack the messages with the same key to the same batch. 
+2. Since it is the consumers instead of the broker who dispatch the messages from the batches, the key of the first message in one batch is considered as the key of all messages in this batch, thereby leading to context errors. 
+
+The key-based batching aims at resolving the above-mentioned issues. This batching method ensures that the producers pack the messages with the same key to the same batch. The messages without a key are packed into one batch and this batch has no key. When the broker dispatches messages from this batch, it uses `NON_KEY` as the key. In addition, each consumer is associated with **only one** key and should receive **only one message batch** for the connected key. By default, you can limit [...]
+
+Below are examples of enabling the key-based batching under the Key_Shared subscription type, with `client` being the Pulsar client that you created.
+
+<Tabs 
+  defaultValue="Java"
+  values={[{"label":"Java","value":"Java"},{"label":"C++","value":"C++"},{"label":"Python","value":"Python"}]}>
+<TabItem value="Java">
+
+```
+
+Producer<byte[]> producer = client.newProducer()
+        .topic("my-topic")
+        .batcherBuilder(BatcherBuilder.KEY_BASED)
+        .create();
+
+```
 
-> **Limitations of Key_Shared type**
+</TabItem>
+<TabItem value="C++">
+
+```
+
+ProducerConfiguration producerConfig;
+producerConfig.setBatchingType(ProducerConfiguration::BatchingType::KeyBasedBatching);
+Producer producer;
+client.createProducer("my-topic", producerConfig, producer);
+
+```
+
+</TabItem>
+<TabItem value="Python">
+
+```
+
+producer = client.create_producer(topic='my-topic', batching_type=pulsar.BatchingType.KeyBased)
+
+```
+
+</TabItem>
+
+</Tabs>
+
+> **Limitations of Key_Shared type**  
 > When you use Key_Shared type, be aware that:
 > * You need to specify a key or orderingKey for messages.
 > * You cannot use cumulative acknowledgment with Key_Shared type.
-> * Your producers should disable batching or use a key-based batch builder.
-
-![Key_Shared subscriptions](/assets/pulsar-key-shared-subscriptions.png)
-
-**You can disable Key_Shared subscription in the `broker.config` file.**
 
 ### Subscription modes
 
@@ -371,6 +525,7 @@ In *Key_Shared* mode, multiple consumers can attach to the same subscription. Me
 The subscription mode indicates the cursor type. 
 
 - When a subscription is created, an associated cursor is created to record the last consumed position. 
+
 - When a consumer of the subscription restarts, it can continue consuming from the last message it consumes.
 
 Subscription mode | Description | Note
@@ -434,7 +589,7 @@ When a consumer subscribes to a Pulsar topic, by default it subscribes to one sp
 
 When subscribing to multiple topics, the Pulsar client automatically makes a call to the Pulsar API to discover the topics that match the regex pattern/list, and then subscribe to all of them. If any of the topics do not exist, the consumer auto-subscribes to them once the topics are created.
 
-> **No ordering guarantees across multiple topics**
+> **No ordering guarantees across multiple topics**  
 > When a producer sends messages to a single topic, all messages are guaranteed to be read from that topic in the same order. However, these guarantees do not hold across multiple topics. So when a producer sends message to multiple topics, the order in which messages are read from those topics is not guaranteed to be the same.
 
 The following are multi-topic subscription examples for Java.
@@ -480,7 +635,7 @@ The **Topic1** topic has five partitions (**P0** through **P4**) split across th
 
 Messages for this topic are broadcast to two consumers. The [routing mode](#routing-modes) determines each message should be published to which partition, while the [subscription type](#subscription-types) determines which messages go to which consumers.
 
-Decisions about routing and subscription types can be made separately in most cases. In general, throughput concerns should guide partitioning/routing decisions while subscription decisions should be guided by application semantics.
+Decisions about routing and subscription modes can be made separately in most cases. In general, throughput concerns should guide partitioning/routing decisions while subscription decisions should be guided by application semantics.
 
 There is no difference between partitioned topics and normal topics in terms of how subscription types work, as partitioning only determines what happens between when a message is published by a producer and processed and acknowledged by a consumer.
 
@@ -577,6 +732,22 @@ Producer<byte[]> producer = client.newProducer()
 
 ```
 
+## Message redelivery
+
+Apache Pulsar supports graceful failure handling and ensures critical data is not lost. Software will always have unexpected conditions and at times messages may not be delivered successfully. Therefore, it is important to have a built-in mechanism that handles failure, particularly in asynchronous messaging as highlighted in the following examples.
+
+- Consumers get disconnected from the database or the HTTP server. When this happens, the database is temporarily offline while the consumer is writing the data to it and the external HTTP server that the consumer calls is momentarily unavailable.
+- Consumers get disconnected from a broker due to consumer crashes, broken connections, etc. As a consequence, the unacknowledged messages are delivered to other available consumers.
+
+Apache Pulsar avoids these and other message delivery failures using at-least-once delivery semantics that ensure Pulsar processes a message more than once. 
+
+To utilize message redelivery, you need to enable this mechanism before the broker can resend the unacknowledged messages in Apache Pulsar client. You can activate the message redelivery mechanism in Apache Pulsar using three methods. 
+
+- [Negative Acknowledgment](#negative-acknowledgement)
+- [Acknowledgement Timeout](#acknowledgement-timeout)
+- [Retry letter topic](#retry-letter-topic)
+
+
 ## Message retention and expiry
 
 By default, Pulsar message brokers:
@@ -626,7 +797,7 @@ Message deduplication makes Pulsar an ideal messaging system to be used in conju
 > You can find more in-depth information in [this post](https://www.splunk.com/en_us/blog/it/exactly-once-is-not-exactly-the-same.html).
 
 ## Delayed message delivery
-Delayed message delivery enables you to consume a message later rather than immediately. In this mechanism, a message is stored in BookKeeper, `DelayedDeliveryTracker` maintains the time index(time -> messageId) in memory after published to a broker, and it is delivered to a consumer once the specific delayed time is passed.  
+Delayed message delivery enables you to consume a message later. In this mechanism, a message is stored in BookKeeper. The `DelayedDeliveryTracker` maintains the time index (time -> messageId) in memory after the message is published to a broker. This message will be delivered to a consumer once the specified delay is over.  
 
 Delayed message delivery only works in Shared subscription type. In Exclusive and Failover subscription types, the delayed message is dispatched immediately.
 
diff --git a/site2/website-next/versioned_docs/version-2.2.1/concepts-multi-tenancy.md b/site2/website-next/versioned_docs/version-2.2.1/concepts-multi-tenancy.md
index be752cc..8a17e72 100644
--- a/site2/website-next/versioned_docs/version-2.2.1/concepts-multi-tenancy.md
+++ b/site2/website-next/versioned_docs/version-2.2.1/concepts-multi-tenancy.md
@@ -2,7 +2,6 @@
 id: concepts-multi-tenancy
 title: Multi Tenancy
 sidebar_label: "Multi Tenancy"
-original_id: concepts-multi-tenancy
 ---
 
 Pulsar was created from the ground up as a multi-tenant system. To support multi-tenancy, Pulsar has a concept of tenants. Tenants can be spread across clusters and can each have their own [authentication and authorization](security-overview) scheme applied to them. They are also the administrative unit at which storage quotas, [message TTL](cookbooks-retention-expiry.md#time-to-live-ttl), and isolation policies can be managed.
diff --git a/site2/website-next/versioned_docs/version-2.2.1/concepts-replication.md b/site2/website-next/versioned_docs/version-2.2.1/concepts-replication.md
index 6e23962..11677cc 100644
--- a/site2/website-next/versioned_docs/version-2.2.1/concepts-replication.md
+++ b/site2/website-next/versioned_docs/version-2.2.1/concepts-replication.md
@@ -2,7 +2,6 @@
 id: concepts-replication
 title: Geo Replication
 sidebar_label: "Geo Replication"
-original_id: concepts-replication
 ---
 
 Pulsar enables messages to be produced and consumed in different geo-locations. For instance, your application may be publishing data in one region or market and you would like to process it for consumption in other regions or markets. [Geo-replication](administration-geo) in Pulsar enables you to do that.
diff --git a/site2/website-next/versioned_docs/version-2.2.1/concepts-schema-registry.md b/site2/website-next/versioned_docs/version-2.2.1/concepts-schema-registry.md
index d8f106a..d28c9e6 100644
--- a/site2/website-next/versioned_docs/version-2.2.1/concepts-schema-registry.md
+++ b/site2/website-next/versioned_docs/version-2.2.1/concepts-schema-registry.md
@@ -2,111 +2,4 @@
 id: concepts-schema-registry
 title: Schema Registry
 sidebar_label: "Schema Registry"
-original_id: concepts-schema-registry
 ---
-
-Type safety is extremely important in any application built around a message bus like Pulsar. Producers and consumers need some kind of mechanism for coordinating types at the topic level lest a wide variety of potential problems arise (for example serialization and deserialization issues). Applications typically adopt one of two basic approaches to type safety in messaging:
-
-1. A "client-side" approach in which message producers and consumers are responsible for not only serializing and deserializing messages (which consist of raw bytes) but also "knowing" which types are being transmitted via which topics. If a producer is sending temperature sensor data on the topic `topic-1`, consumers of that topic will run into trouble if they attempt to parse that data as, say, moisture sensor readings.
-2. A "server-side" approach in which producers and consumers inform the system which data types can be transmitted via the topic. With this approach, the messaging system enforces type safety and ensures that producers and consumers remain synced.
-
-Both approaches are available in Pulsar, and you're free to adopt one or the other or to mix and match on a per-topic basis.
-
-1. For the "client-side" approach, producers and consumers can send and receive messages consisting of raw byte arrays and leave all type safety enforcement to the application on an "out-of-band" basis.
-1. For the "server-side" approach, Pulsar has a built-in **schema registry** that enables clients to upload data schemas on a per-topic basis. Those schemas dictate which data types are recognized as valid for that topic.
-
-#### Note
->
-> Currently, the Pulsar schema registry is only available for the [Java client](client-libraries-java.md), [CGo client](client-libraries-go.md), [Python client](client-libraries-python.md), and [C++ client](client-libraries-cpp).
-
-## Basic architecture
-
-Schemas are automatically uploaded when you create a typed Producer with a Schema. Additionally, Schemas can be manually uploaded to, fetched from, and updated via Pulsar's {@inject: rest:REST:tag/schemas} API.
-
-> #### Other schema registry backends
-> Out of the box, Pulsar uses the [Apache BookKeeper](concepts-architecture-overview#persistent-storage) log storage system for schema storage. You can, however, use different backends if you wish. Documentation for custom schema storage logic is coming soon.
-
-## How schemas work
-
-Pulsar schemas are applied and enforced *at the topic level* (schemas cannot be applied at the namespace or tenant level). Producers and consumers upload schemas to Pulsar brokers.
-
-Pulsar schemas are fairly simple data structures that consist of:
-
-* A **name**. In Pulsar, a schema's name is the topic to which the schema is applied.
-* A **payload**, which is a binary representation of the schema
-* A schema [**type**](#supported-schema-formats)
-* User-defined **properties** as a string/string map. Usage of properties is wholly application specific. Possible properties might be the Git hash associated with a schema, an environment like `dev` or `prod`, etc.
-
-## Schema versions
-
-In order to illustrate how schema versioning works, let's walk through an example. Imagine that the Pulsar [Java client](client-libraries-java) created using the code below attempts to connect to Pulsar and begin sending messages:
-
-```java
-
-PulsarClient client = PulsarClient.builder()
-        .serviceUrl("pulsar://localhost:6650")
-        .build();
-
-Producer<SensorReading> producer = client.newProducer(JSONSchema.of(SensorReading.class))
-        .topic("sensor-data")
-        .sendTimeout(3, TimeUnit.SECONDS)
-        .create();
-
-```
-
-The table below lists the possible scenarios when this connection attempt occurs and what will happen in light of each scenario:
-
-Scenario | What happens
-:--------|:------------
-No schema exists for the topic | The producer is created using the given schema. The schema is transmitted to the broker and stored (since no existing schema is "compatible" with the `SensorReading` schema). Any consumer created using the same schema/topic can consume messages from the `sensor-data` topic.
-A schema already exists; the producer connects using the same schema that's already stored | The schema is transmitted to the Pulsar broker. The broker determines that the schema is compatible. The broker attempts to store the schema in [BookKeeper](concepts-architecture-overview.md#persistent-storage) but then determines that it's already stored, so it's then used to tag produced messages.
-A schema already exists; the producer connects using a new schema that is compatible | The producer transmits the schema to the broker. The broker determines that the schema is compatible and stores the new schema as the current version (with a new version number).
-
-> Schemas are versioned in succession. Schema storage happens in the broker that handles the associated topic so that version assignments can be made. Once a version is assigned/fetched to/for a schema, all subsequent messages produced by that producer are tagged with the appropriate version.
-
-
-## Supported schema formats
-
-The following formats are supported by the Pulsar schema registry:
-
-* None. If no schema is specified for a topic, producers and consumers will handle raw bytes.
-* `String` (used for UTF-8-encoded strings)
-* [JSON](https://www.json.org/)
-* [Protobuf](https://developers.google.com/protocol-buffers/)
-* [Avro](https://avro.apache.org/)
-
-For usage instructions, see the documentation for your preferred client library:
-
-* [Java](client-libraries-java.md#schemas)
-
-> Support for other schema formats will be added in future releases of Pulsar.
-
-The following example shows how to define an Avro schema using the `GenericSchemaBuilder`, generate a generic Avro schema using `GenericRecordBuilder`, and consume messages into `GenericRecord`.
-
-**Example** 
-
-1. Use the `RecordSchemaBuilder` to build a schema.
-
-   ```java
-   
-   RecordSchemaBuilder recordSchemaBuilder = SchemaBuilder.record("schemaName");
-   recordSchemaBuilder.field("intField").type(SchemaType.INT32);
-   SchemaInfo schemaInfo = recordSchemaBuilder.build(SchemaType.AVRO);
-
-   Producer<GenericRecord> producer = client.newProducer(Schema.generic(schemaInfo)).create();
-   
-   ```
-
-2. Use `RecordBuilder` to build the generic records.
-
-   ```java
-   
-   producer.newMessage().value(schema.newRecordBuilder()
-               .set("intField", 32)
-               .build()).send();
-   
-   ```
-
-## Managing Schemas
-
-You can use Pulsar admin tools to manage schemas for topics.
diff --git a/site2/website-next/versioned_docs/version-2.2.1/concepts-tiered-storage.md b/site2/website-next/versioned_docs/version-2.2.1/concepts-tiered-storage.md
index 0b45b0a..f0bbde6 100644
--- a/site2/website-next/versioned_docs/version-2.2.1/concepts-tiered-storage.md
+++ b/site2/website-next/versioned_docs/version-2.2.1/concepts-tiered-storage.md
@@ -2,7 +2,6 @@
 id: concepts-tiered-storage
 title: Tiered Storage
 sidebar_label: "Tiered Storage"
-original_id: concepts-tiered-storage
 ---
 
 Pulsar's segment oriented architecture allows for topic backlogs to grow very large, effectively without limit. However, this can become expensive over time.
diff --git a/site2/website-next/versioned_docs/version-2.2.1/concepts-topic-compaction.md b/site2/website-next/versioned_docs/version-2.2.1/concepts-topic-compaction.md
index c85e703..3356298 100644
--- a/site2/website-next/versioned_docs/version-2.2.1/concepts-topic-compaction.md
+++ b/site2/website-next/versioned_docs/version-2.2.1/concepts-topic-compaction.md
@@ -2,7 +2,6 @@
 id: concepts-topic-compaction
 title: Topic Compaction
 sidebar_label: "Topic Compaction"
-original_id: concepts-topic-compaction
 ---
 
 Pulsar was built with highly scalable [persistent storage](concepts-architecture-overview.md#persistent-storage) of message data as a primary objective. Pulsar topics enable you to persistently store as many unacknowledged messages as you need while preserving message ordering. By default, Pulsar stores *all* unacknowledged/unprocessed messages produced on a topic. Accumulating many unacknowledged messages on a topic is necessary for many Pulsar use cases but it can also be very time int [...]
diff --git a/site2/website-next/versioned_docs/version-2.2.1/cookbooks-compaction.md b/site2/website-next/versioned_docs/version-2.2.1/cookbooks-compaction.md
index 0a36233..f95f64c 100644
--- a/site2/website-next/versioned_docs/version-2.2.1/cookbooks-compaction.md
+++ b/site2/website-next/versioned_docs/version-2.2.1/cookbooks-compaction.md
@@ -2,7 +2,6 @@
 id: cookbooks-compaction
 title: Topic compaction
 sidebar_label: "Topic compaction"
-original_id: cookbooks-compaction
 ---
 
 Pulsar's [topic compaction](concepts-topic-compaction.md#compaction) feature enables you to create **compacted** topics in which older, "obscured" entries are pruned from the topic, allowing for faster reads through the topic's history (which messages are deemed obscured/outdated/irrelevant will depend on your use case).
diff --git a/site2/website-next/versioned_docs/version-2.2.1/cookbooks-deduplication.md b/site2/website-next/versioned_docs/version-2.2.1/cookbooks-deduplication.md
index 1669afa..307fe03 100644
--- a/site2/website-next/versioned_docs/version-2.2.1/cookbooks-deduplication.md
+++ b/site2/website-next/versioned_docs/version-2.2.1/cookbooks-deduplication.md
@@ -1,8 +1,7 @@
 ---
 id: cookbooks-deduplication
 title: Message deduplication
-sidebar_label: "Message deduplication"
-original_id: cookbooks-deduplication
+sidebar_label: "Message deduplication "
 ---
 
 import Tabs from '@theme/Tabs';
diff --git a/site2/website-next/versioned_docs/version-2.2.1/cookbooks-encryption.md b/site2/website-next/versioned_docs/version-2.2.1/cookbooks-encryption.md
index f0d8fb8..fbd1c97 100644
--- a/site2/website-next/versioned_docs/version-2.2.1/cookbooks-encryption.md
+++ b/site2/website-next/versioned_docs/version-2.2.1/cookbooks-encryption.md
@@ -1,8 +1,7 @@
 ---
 id: cookbooks-encryption
 title: Pulsar Encryption
-sidebar_label: "Encryption"
-original_id: cookbooks-encryption
+sidebar_label: "Encryption "
 ---
 
 Pulsar encryption allows applications to encrypt messages at the producer and decrypt at the consumer. Encryption is performed using the public/private key pair configured by the application. Encrypted messages can only be decrypted by consumers with a valid key.
diff --git a/site2/website-next/versioned_docs/version-2.2.1/cookbooks-message-queue.md b/site2/website-next/versioned_docs/version-2.2.1/cookbooks-message-queue.md
index eb43cbd..9b93a94 100644
--- a/site2/website-next/versioned_docs/version-2.2.1/cookbooks-message-queue.md
+++ b/site2/website-next/versioned_docs/version-2.2.1/cookbooks-message-queue.md
@@ -2,7 +2,6 @@
 id: cookbooks-message-queue
 title: Using Pulsar as a message queue
 sidebar_label: "Message queue"
-original_id: cookbooks-message-queue
 ---
 
 Message queues are essential components of many large-scale data architectures. If every single work object that passes through your system absolutely *must* be processed in spite of the slowness or downright failure of this or that system component, there's a good chance that you'll need a message queue to step in and ensure that unprocessed data is retained---with correct ordering---until the required actions are taken.
diff --git a/site2/website-next/versioned_docs/version-2.2.1/cookbooks-non-persistent.md b/site2/website-next/versioned_docs/version-2.2.1/cookbooks-non-persistent.md
index 391569a..d40c4fb 100644
--- a/site2/website-next/versioned_docs/version-2.2.1/cookbooks-non-persistent.md
+++ b/site2/website-next/versioned_docs/version-2.2.1/cookbooks-non-persistent.md
@@ -2,7 +2,6 @@
 id: cookbooks-non-persistent
 title: Non-persistent messaging
 sidebar_label: "Non-persistent messaging"
-original_id: cookbooks-non-persistent
 ---
 
 **Non-persistent topics** are Pulsar topics in which message data is *never* [persistently stored](concepts-architecture-overview.md#persistent-storage) and kept only in memory. This cookbook provides:
diff --git a/site2/website-next/versioned_docs/version-2.2.1/cookbooks-partitioned.md b/site2/website-next/versioned_docs/version-2.2.1/cookbooks-partitioned.md
index 7882fb9..2589693 100644
--- a/site2/website-next/versioned_docs/version-2.2.1/cookbooks-partitioned.md
+++ b/site2/website-next/versioned_docs/version-2.2.1/cookbooks-partitioned.md
@@ -2,6 +2,5 @@
 id: cookbooks-partitioned
 title: Partitioned topics
 sidebar_label: "Partitioned Topics"
-original_id: cookbooks-partitioned
 ---
 For details of the content, refer to [manage topics](admin-api-topics).
\ No newline at end of file
diff --git a/site2/website-next/versioned_docs/version-2.2.1/cookbooks-retention-expiry.md b/site2/website-next/versioned_docs/version-2.2.1/cookbooks-retention-expiry.md
index b9353b5..738cf42 100644
--- a/site2/website-next/versioned_docs/version-2.2.1/cookbooks-retention-expiry.md
+++ b/site2/website-next/versioned_docs/version-2.2.1/cookbooks-retention-expiry.md
@@ -2,7 +2,6 @@
 id: cookbooks-retention-expiry
 title: Message retention and expiry
 sidebar_label: "Message retention and expiry"
-original_id: cookbooks-retention-expiry
 ---
 
 import Tabs from '@theme/Tabs';
@@ -36,7 +35,7 @@ By default, when a Pulsar message arrives at a broker, the message is stored unt
 
 Retention policies are useful when you use the Reader interface. The Reader interface does not use acknowledgements, and messages do not exist within backlogs. It is required to configure retention for Reader-only use cases.
 
-When you set a retention policy on topics in a namespace, you must set **both** a *size limit* and a *time limit*. You can refer to the following table to set retention policies in `pulsar-admin` and Java.
+When you set a retention policy on topics in a namespace, you must set **both** a *size limit* (via `defaultRetentionSizeInMB`) and a *time limit* (via `defaultRetentionTimeInMinutes`) . You can refer to the following table to set retention policies in `pulsar-admin` and Java.
 
 |Time limit|Size limit| Message retention      |
 |----------|----------|------------------------|
@@ -152,7 +151,10 @@ admin.namespaces().setRetention(namespace, policies);
 
 You can fetch the retention policy for a namespace by specifying the namespace. The output will be a JSON object with two keys: `retentionTimeInMinutes` and `retentionSizeInMB`.
 
-#### pulsar-admin
+<Tabs 
+  defaultValue="pulsar-admin"
+  values={[{"label":"pulsar-admin","value":"pulsar-admin"},{"label":"REST API","value":"REST API"},{"label":"Java","value":"Java"}]}>
+<TabItem value="pulsar-admin">
 
 Use the [`get-retention`](reference-pulsar-admin.md#namespaces) subcommand and specify the namespace.
 
@@ -168,11 +170,13 @@ $ pulsar-admin namespaces get-retention my-tenant/my-ns
 
 ```
 
-#### REST API
+</TabItem>
+<TabItem value="REST API">
 
 {@inject: endpoint|GET|/admin/v2/namespaces/:tenant/:namespace/retention|operation/getRetention?version=@pulsar:version_number@}
 
-#### Java
+</TabItem>
+<TabItem value="Java">
 
 ```java
 
@@ -180,15 +184,17 @@ admin.namespaces().getRetention(namespace);
 
 ```
 
+</TabItem>
+
+</Tabs>
+
 ## Backlog quotas
 
 *Backlogs* are sets of unacknowledged messages for a topic that have been stored by bookies. Pulsar stores all unacknowledged messages in backlogs until they are processed and acknowledged.
 
-You can control the allowable size of backlogs, at the namespace level, using *backlog quotas*. Setting a backlog quota involves setting:
+You can control the allowable size and/or time of backlogs, at the namespace level, using *backlog quotas*. Setting a backlog quota involves setting:
 
-TODO: Expand on is this per backlog or per topic?
-
-* an allowable *size threshold* for each topic in the namespace
+* an allowable *size and/or time threshold* for each topic in the namespace
 * a *retention policy* that determines which action the [broker](reference-terminology.md#broker) takes if the threshold is exceeded.
 
 The following retention policies are available:
@@ -210,9 +216,12 @@ Backlog quotas are handled at the namespace level. They can be managed via:
 
 You can set a size and/or time threshold and backlog retention policy for all of the topics in a [namespace](reference-terminology.md#namespace) by specifying the namespace, a size limit and/or a time limit in second, and a policy by name.
 
-#### pulsar-admin
+<Tabs 
+  defaultValue="pulsar-admin"
+  values={[{"label":"pulsar-admin","value":"pulsar-admin"},{"label":"REST API","value":"REST API"},{"label":"Java","value":"Java"}]}>
+<TabItem value="pulsar-admin">
 
-Use the [`set-backlog-quota`](reference-pulsar-admin.md#namespaces) subcommand and specify a namespace, a size limit using the `-l`/`--limit` flag, and a retention policy using the `-p`/`--policy` flag.
+Use the [`set-backlog-quota`](reference-pulsar-admin.md#namespaces) subcommand and specify a namespace, a size limit using the `-l`/`--limit` , `-lt`/`--limitTime` flag to limit backlog, a retention policy using the `-p`/`--policy` flag and a policy type using `-t`/`--type` (default is destination_storage).
 
 ##### Example
 
@@ -220,16 +229,26 @@ Use the [`set-backlog-quota`](reference-pulsar-admin.md#namespaces) subcommand a
 
 $ pulsar-admin namespaces set-backlog-quota my-tenant/my-ns \
   --limit 2G \
-  --limitTime 36000 \
   --policy producer_request_hold
 
 ```
 
-#### REST API
+```shell
+
+$ pulsar-admin namespaces set-backlog-quota my-tenant/my-ns/my-topic \
+--limitTime 3600 \
+--policy producer_request_hold \
+--type message_age
+
+```
+
+</TabItem>
+<TabItem value="REST API">
 
 {@inject: endpoint|POST|/admin/v2/namespaces/:tenant/:namespace/backlogQuota|operation/getBacklogQuotaMap?version=@pulsar:version_number@}
 
-#### Java
+</TabItem>
+<TabItem value="Java">
 
 ```java
 
@@ -240,11 +259,18 @@ admin.namespaces().setBacklogQuota(namespace, quota);
 
 ```
 
+</TabItem>
+
+</Tabs>
+
 ### Get backlog threshold and backlog retention policy
 
 You can see which size threshold and backlog retention policy has been applied to a namespace.
 
-#### pulsar-admin
+<Tabs 
+  defaultValue="pulsar-admin"
+  values={[{"label":"pulsar-admin","value":"pulsar-admin"},{"label":"REST API","value":"REST API"},{"label":"Java","value":"Java"}]}>
+<TabItem value="pulsar-admin">
 
 Use the [`get-backlog-quotas`](reference-pulsar-admin.md#pulsar-admin-namespaces-get-backlog-quotas) subcommand and specify a namespace. Here's an example:
 
@@ -260,11 +286,13 @@ $ pulsar-admin namespaces get-backlog-quotas my-tenant/my-ns
 
 ```
 
-#### REST API
+</TabItem>
+<TabItem value="REST API">
 
 {@inject: endpoint|GET|/admin/v2/namespaces/:tenant/:namespace/backlogQuotaMap|operation/getBacklogQuotaMap?version=@pulsar:version_number@}
 
-#### Java
+</TabItem>
+<TabItem value="Java">
 
 ```java
 
@@ -273,11 +301,18 @@ Map<BacklogQuota.BacklogQuotaType,BacklogQuota> quotas =
 
 ```
 
+</TabItem>
+
+</Tabs>
+
 ### Remove backlog quotas
 
-#### pulsar-admin
+<Tabs 
+  defaultValue="pulsar-admin"
+  values={[{"label":"pulsar-admin","value":"pulsar-admin"},{"label":"REST API","value":"REST API"},{"label":"Java","value":"Java"}]}>
+<TabItem value="pulsar-admin">
 
-Use the [`remove-backlog-quota`](reference-pulsar-admin.md#pulsar-admin-namespaces-remove-backlog-quota) subcommand and specify a namespace. Here's an example:
+Use the [`remove-backlog-quota`](reference-pulsar-admin.md#pulsar-admin-namespaces-remove-backlog-quota) subcommand and specify a namespace, use `t`/`--type` to specify backlog type to remove(default is destination_storage). Here's an example:
 
 ```shell
 
@@ -285,11 +320,13 @@ $ pulsar-admin namespaces remove-backlog-quota my-tenant/my-ns
 
 ```
 
-#### REST API
+</TabItem>
+<TabItem value="REST API">
 
 {@inject: endpoint|DELETE|/admin/v2/namespaces/:tenant/:namespace/backlogQuota|operation/removeBacklogQuota?version=@pulsar:version_number@}
 
-#### Java
+</TabItem>
+<TabItem value="Java">
 
 ```java
 
@@ -297,6 +334,10 @@ admin.namespaces().removeBacklogQuota(namespace);
 
 ```
 
+</TabItem>
+
+</Tabs>
+
 ### Clear backlog
 
 #### pulsar-admin
@@ -319,7 +360,10 @@ By default, Pulsar stores all unacknowledged messages forever. This can lead to
 
 ### Set the TTL for a namespace
 
-#### pulsar-admin
+<Tabs 
+  defaultValue="pulsar-admin"
+  values={[{"label":"pulsar-admin","value":"pulsar-admin"},{"label":"REST API","value":"REST API"},{"label":"Java","value":"Java"}]}>
+<TabItem value="pulsar-admin">
 
 Use the [`set-message-ttl`](reference-pulsar-admin.md#pulsar-admin-namespaces-set-message-ttl) subcommand and specify a namespace and a TTL (in seconds) using the `-ttl`/`--messageTTL` flag.
 
@@ -332,11 +376,13 @@ $ pulsar-admin namespaces set-message-ttl my-tenant/my-ns \
 
 ```
 
-#### REST API
+</TabItem>
+<TabItem value="REST API">
 
 {@inject: endpoint|POST|/admin/v2/namespaces/:tenant/:namespace/messageTTL|operation/setNamespaceMessageTTL?version=@pulsar:version_number@}
 
-#### Java
+</TabItem>
+<TabItem value="Java">
 
 ```java
 
@@ -344,9 +390,16 @@ admin.namespaces().setNamespaceMessageTTL(namespace, ttlInSeconds);
 
 ```
 
+</TabItem>
+
+</Tabs>
+
 ### Get the TTL configuration for a namespace
 
-#### pulsar-admin
+<Tabs 
+  defaultValue="pulsar-admin"
+  values={[{"label":"pulsar-admin","value":"pulsar-admin"},{"label":"REST API","value":"REST API"},{"label":"Java","value":"Java"}]}>
+<TabItem value="pulsar-admin">
 
 Use the [`get-message-ttl`](reference-pulsar-admin.md#pulsar-admin-namespaces-get-message-ttl) subcommand and specify a namespace.
 
@@ -359,11 +412,13 @@ $ pulsar-admin namespaces get-message-ttl my-tenant/my-ns
 
 ```
 
-#### REST API
+</TabItem>
+<TabItem value="REST API">
 
 {@inject: endpoint|GET|/admin/v2/namespaces/:tenant/:namespace/messageTTL|operation/getNamespaceMessageTTL?version=@pulsar:version_number@}
 
-#### Java
+</TabItem>
+<TabItem value="Java">
 
 ```java
 
@@ -371,9 +426,16 @@ admin.namespaces().getNamespaceMessageTTL(namespace)
 
 ```
 
+</TabItem>
+
+</Tabs>
+
 ### Remove the TTL configuration for a namespace
 
-#### pulsar-admin
+<Tabs 
+  defaultValue="pulsar-admin"
+  values={[{"label":"pulsar-admin","value":"pulsar-admin"},{"label":"REST API","value":"REST API"},{"label":"Java","value":"Java"}]}>
+<TabItem value="pulsar-admin">
 
 Use the [`remove-message-ttl`](reference-pulsar-admin.md#pulsar-admin-namespaces-remove-message-ttl) subcommand and specify a namespace.
 
@@ -385,11 +447,13 @@ $ pulsar-admin namespaces remove-message-ttl my-tenant/my-ns
 
 ```
 
-#### REST API
+</TabItem>
+<TabItem value="REST API">
 
 {@inject: endpoint|DELETE|/admin/v2/namespaces/:tenant/:namespace/messageTTL|operation/removeNamespaceMessageTTL?version=@pulsar:version_number@}
 
-#### Java
+</TabItem>
+<TabItem value="Java">
 
 ```java
 
@@ -397,6 +461,10 @@ admin.namespaces().removeNamespaceMessageTTL(namespace)
 
 ```
 
+</TabItem>
+
+</Tabs>
+
 ## Delete messages from namespaces
 
 If you do not have any retention period and that you never have much of a backlog, the upper limit for retaining messages, which are acknowledged, equals to the Pulsar segment rollover period + entry log rollover period + (garbage collection interval * garbage collection ratios).
diff --git a/site2/website-next/versioned_docs/version-2.2.1/cookbooks-tiered-storage.md b/site2/website-next/versioned_docs/version-2.2.1/cookbooks-tiered-storage.md
index 8f6a7fb..f2ea50d 100644
--- a/site2/website-next/versioned_docs/version-2.2.1/cookbooks-tiered-storage.md
+++ b/site2/website-next/versioned_docs/version-2.2.1/cookbooks-tiered-storage.md
@@ -2,14 +2,15 @@
 id: cookbooks-tiered-storage
 title: Tiered Storage
 sidebar_label: "Tiered Storage"
-original_id: cookbooks-tiered-storage
 ---
 
 Pulsar's **Tiered Storage** feature allows older backlog data to be offloaded to long term storage, thereby freeing up space in BookKeeper and reducing storage costs. This cookbook walks you through using tiered storage in your Pulsar cluster.
 
-* Tiered storage uses [Apache jclouds](https://jclouds.apache.org) to support [Amazon S3](https://aws.amazon.com/s3/) and [Google Cloud Storage](https://cloud.google.com/storage/)(GCS for short) for long term storage. With Jclouds, it is easy to add support for more [cloud storage providers](https://jclouds.apache.org/reference/providers/#blobstore-providers) in the future.
+* Tiered storage uses [Apache jclouds](https://jclouds.apache.org) to support [Amazon S3](https://aws.amazon.com/s3/) and [Google Cloud Storage](https://cloud.google.com/storage/)(GCS for short)
+for long term storage. With Jclouds, it is easy to add support for more [cloud storage providers](https://jclouds.apache.org/reference/providers/#blobstore-providers) in the future.
 
-* Tiered storage uses [Apache Hadoop](http://hadoop.apache.org/) to support filesystem for long term storage. With Hadoop, it is easy to add support for more filesystem in the future.
+* Tiered storage uses [Apache Hadoop](http://hadoop.apache.org/) to support filesystem for long term storage. 
+With Hadoop, it is easy to add support for more filesystem in the future.
 
 ## When should I use Tiered Storage?
 
diff --git a/site2/website-next/versioned_docs/version-2.2.1/deploy-aws.md b/site2/website-next/versioned_docs/version-2.2.1/deploy-aws.md
index 6323051..2034749 100644
--- a/site2/website-next/versioned_docs/version-2.2.1/deploy-aws.md
+++ b/site2/website-next/versioned_docs/version-2.2.1/deploy-aws.md
@@ -2,7 +2,6 @@
 id: deploy-aws
 title: Deploying a Pulsar cluster on AWS using Terraform and Ansible
 sidebar_label: "Amazon Web Services"
-original_id: deploy-aws
 ---
 
 > For instructions on deploying a single Pulsar cluster manually rather than using Terraform and Ansible, see [Deploying a Pulsar cluster on bare metal](deploy-bare-metal.md). For instructions on manually deploying a multi-cluster Pulsar instance, see [Deploying a Pulsar instance on bare metal](deploy-bare-metal-multi-cluster).
@@ -148,7 +147,7 @@ Variable name | Description | Default
 When you run the Ansible playbook, the following AWS resources are used:
 
 * 9 total [Elastic Compute Cloud](https://aws.amazon.com/ec2) (EC2) instances running the [ami-9fa343e7](https://access.redhat.com/articles/3135091) Amazon Machine Image (AMI), which runs [Red Hat Enterprise Linux (RHEL) 7.4](https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/7/html-single/7.4_release_notes/index). By default, that includes:
-  * 3 small VMs for ZooKeeper ([t2.small](https://www.ec2instances.info/?selected=t2.small) instances)
+  * 3 small VMs for ZooKeeper ([t3.small](https://www.ec2instances.info/?selected=t3.small) instances)
   * 3 larger VMs for BookKeeper [bookies](reference-terminology.md#bookie) ([i3.xlarge](https://www.ec2instances.info/?selected=i3.xlarge) instances)
   * 2 larger VMs for Pulsar [brokers](reference-terminology.md#broker) ([c5.2xlarge](https://www.ec2instances.info/?selected=c5.2xlarge) instances)
   * 1 larger VMs for Pulsar [proxy](reference-terminology.md#proxy) ([c5.2xlarge](https://www.ec2instances.info/?selected=c5.2xlarge) instances)
diff --git a/site2/website-next/versioned_docs/version-2.2.1/deploy-bare-metal-multi-cluster.md b/site2/website-next/versioned_docs/version-2.2.1/deploy-bare-metal-multi-cluster.md
index 643c122..9dd2526 100644
--- a/site2/website-next/versioned_docs/version-2.2.1/deploy-bare-metal-multi-cluster.md
+++ b/site2/website-next/versioned_docs/version-2.2.1/deploy-bare-metal-multi-cluster.md
@@ -2,38 +2,30 @@
 id: deploy-bare-metal-multi-cluster
 title: Deploying a multi-cluster on bare metal
 sidebar_label: "Bare metal multi-cluster"
-original_id: deploy-bare-metal-multi-cluster
 ---
 
 :::tip
 
-1. Single-cluster Pulsar installations should be sufficient for all but the most ambitious use cases. If you are interested in experimenting with
-Pulsar or using it in a startup or on a single team, you had better opt for a single cluster. For instructions on deploying a single cluster,
-see the guide [here](deploy-bare-metal).
-2. If you want to use all builtin [Pulsar IO](io-overview) connectors in your Pulsar deployment, you need to download `apache-pulsar-io-connectors`
-package and install `apache-pulsar-io-connectors` under `connectors` directory in the pulsar directory on every broker node or on every function-worker node if you
-run a separate cluster of function workers for [Pulsar Functions](functions-overview).
-3. If you want to use [Tiered Storage](concepts-tiered-storage) feature in your Pulsar deployment, you need to download `apache-pulsar-offloaders`
-package and install `apache-pulsar-offloaders` under `offloaders` directory in the pulsar directory on every broker node. For more details of how to configure
-this feature, you can refer to the [Tiered storage cookbook](cookbooks-tiered-storage).
+1. You can use single-cluster Pulsar installation in most use cases, such as experimenting with Pulsar or using Pulsar in a startup or in a single team. If you need to run a multi-cluster Pulsar instance, see the [guide](deploy-bare-metal-multi-cluster).
+2. If you want to use all built-in [Pulsar IO](io-overview.md) connectors, you need to download `apache-pulsar-io-connectors`package and install `apache-pulsar-io-connectors` under `connectors` directory in the pulsar directory on every broker node or on every function-worker node if you have run a separate cluster of function workers for [Pulsar Functions](functions-overview).
+3. If you want to use [Tiered Storage](concepts-tiered-storage.md) feature in your Pulsar deployment, you need to download `apache-pulsar-offloaders`package and install `apache-pulsar-offloaders` under `offloaders` directory in the Pulsar directory on every broker node. For more details of how to configure this feature, you can refer to the [Tiered storage cookbook](cookbooks-tiered-storage).
 
 :::
 
-A Pulsar *instance* consists of multiple Pulsar clusters working in unison. You can distribute clusters across data centers or geographical regions and replicate the clusters amongst themselves using [geo-replication](administration-geo). Deploying a multi-cluster Pulsar instance involves the following basic steps:
+A Pulsar instance consists of multiple Pulsar clusters working in unison. You can distribute clusters across data centers or geographical regions and replicate the clusters amongst themselves using [geo-replication](administration-geo).Deploying a  multi-cluster Pulsar instance consists of the following steps:
 
-* Deploying two separate [ZooKeeper](#deploy-zookeeper) quorums: a [local](#deploy-local-zookeeper) quorum for each cluster in the instance and a [configuration store](#configuration-store) quorum for instance-wide tasks
-* Initializing [cluster metadata](#cluster-metadata-initialization) for each cluster
-* Deploying a [BookKeeper cluster](#deploy-bookkeeper) of bookies in each Pulsar cluster
-* Deploying [brokers](#deploy-brokers) in each Pulsar cluster
+1. Deploying two separate ZooKeeper quorums: a local quorum for each cluster in the instance and a configuration store quorum for instance-wide tasks
+2. Initializing cluster metadata for each cluster
+3. Deploying a BookKeeper cluster of bookies in each Pulsar cluster
+4. Deploying brokers in each Pulsar cluster
 
-If you want to deploy a single Pulsar cluster, see [Clusters and Brokers](getting-started-standalone.md#start-the-cluster).
 
 > #### Run Pulsar locally or on Kubernetes?
-> This guide shows you how to deploy Pulsar in production in a non-Kubernetes environment. If you want to run a standalone Pulsar cluster on a single machine for development purposes, see the [Setting up a local cluster](getting-started-standalone.md) guide. If you want to run Pulsar on [Kubernetes](https://kubernetes.io), see the [Pulsar on Kubernetes](deploy-kubernetes) guide, which includes sections on running Pulsar on Kubernetes on [Google Kubernetes Engine](deploy-kubernetes#pulsar [...]
+> This guide shows you how to deploy Pulsar in production in a non-Kubernetes environment. If you want to run a standalone Pulsar cluster on a single machine for development purposes, see the [Setting up a local cluster](getting-started-standalone.md) guide. If you want to run Pulsar on [Kubernetes](https://kubernetes.io), see the [Pulsar on Kubernetes](deploy-kubernetes) guide, which includes sections on running Pulsar on Kubernetes, on Google Kubernetes Engine and on Amazon Web Services.
 
 ## System requirement
 
-Currently, Pulsar is available for 64-bit **macOS**, **Linux**, and **Windows**. To use Pulsar, you need to install 64-bit JRE/JDK 8 or later versions.
+Currently, Pulsar is available for 64-bit **macOS**, **Linux**, and **Windows**. You need to install 64-bit JRE/JDK 8 or later versions.
 
 :::note
 
@@ -68,8 +60,6 @@ $ cd apache-pulsar-@pulsar:version@
 
 ```
 
-## What your package contains
-
 The Pulsar binary package initially contains the following directories:
 
 Directory | Contains
@@ -93,17 +83,17 @@ Directory | Contains
 
 Each Pulsar instance relies on two separate ZooKeeper quorums.
 
-* [Local ZooKeeper](#deploy-local-zookeeper) operates at the cluster level and provides cluster-specific configuration management and coordination. Each Pulsar cluster needs to have a dedicated ZooKeeper cluster.
-* [Configuration Store](#deploy-the-configuration-store) operates at the instance level and provides configuration management for the entire system (and thus across clusters). An independent cluster of machines or the same machines that local ZooKeeper uses can provide the configuration store quorum.
+* Local ZooKeeper operates at the cluster level and provides cluster-specific configuration management and coordination. Each Pulsar cluster needs a dedicated ZooKeeper cluster.
+* Configuration Store operates at the instance level and provides configuration management for the entire system (and thus across clusters). An independent cluster of machines or the same machines that local ZooKeeper uses can provide the configuration store quorum.
 
-The configuration store quorum can be provided by an independent cluster of machines or by the same machines used by local ZooKeeper.
+You can use an independent cluster of machines or the same machines used by local ZooKeeper to provide the configuration store quorum.
 
 
 ### Deploy local ZooKeeper
 
 ZooKeeper manages a variety of essential coordination-related and configuration-related tasks for Pulsar.
 
-You need to stand up one local ZooKeeper cluster *per Pulsar cluster* for deploying a Pulsar instance. 
+You need to stand up one local ZooKeeper cluster per Pulsar cluster for deploying a Pulsar instance. 
 
 To begin, add all ZooKeeper servers to the quorum configuration specified in the [`conf/zookeeper.conf`](reference-configuration.md#zookeeper) file. Add a `server.N` line for each node in the cluster to the configuration, where `N` is the number of the ZooKeeper node. The following is an example for a three-node cluster:
 
@@ -117,7 +107,11 @@ server.3=zk3.us-west.example.com:2888:3888
 
 On each host, you need to specify the ID of the node in the `myid` file of each node, which is in `data/zookeeper` folder of each server by default (you can change the file location via the [`dataDir`](reference-configuration.md#zookeeper-dataDir) parameter).
 
-> See the [Multi-server setup guide](https://zookeeper.apache.org/doc/r3.4.10/zookeeperAdmin.html#sc_zkMulitServerSetup) in the ZooKeeper documentation for detailed information on `myid` and more.
+:::tip
+
+See the [Multi-server setup guide](https://zookeeper.apache.org/doc/r3.4.10/zookeeperAdmin.html#sc_zkMulitServerSetup) in the ZooKeeper documentation for detailed information on `myid` and more.
+
+:::
 
 On a ZooKeeper server at `zk1.us-west.example.com`, for example, you could set the `myid` value like this:
 
@@ -140,15 +134,15 @@ $ bin/pulsar-daemon start zookeeper
 
 ### Deploy the configuration store 
 
-The ZooKeeper cluster that is configured and started up in the section above is a *local* ZooKeeper cluster that you can use to manage a single Pulsar cluster. In addition to a local cluster, however, a full Pulsar instance also requires a configuration store for handling some instance-level configuration and coordination tasks.
+The ZooKeeper cluster configured and started up in the section above is a local ZooKeeper cluster that you can use to manage a single Pulsar cluster. In addition to a local cluster, however, a full Pulsar instance also requires a configuration store for handling some instance-level configuration and coordination tasks.
 
-If you deploy a [single-cluster](#single-cluster-pulsar-instance) instance, you do not need a separate cluster for the configuration store. If, however, you deploy a [multi-cluster](#multi-cluster-pulsar-instance) instance, you should stand up a separate ZooKeeper cluster for configuration tasks.
+If you deploy a single-cluster instance, you do not need a separate cluster for the configuration store. If, however, you deploy a multi-cluster instance, you should stand up a separate ZooKeeper cluster for configuration tasks.
 
 #### Single-cluster Pulsar instance
 
 If your Pulsar instance consists of just one cluster, then you can deploy a configuration store on the same machines as the local ZooKeeper quorum but run on different TCP ports.
 
-To deploy a ZooKeeper configuration store in a single-cluster instance, add the same ZooKeeper servers that the local quorum uses to the configuration file in [`conf/global_zookeeper.conf`](reference-configuration.md#configuration-store) using the same method for [local ZooKeeper](#local-zookeeper), but make sure to use a different port (2181 is the default for ZooKeeper). The following is an example that uses port 2184 for a three-node ZooKeeper cluster:
+To deploy a ZooKeeper configuration store in a single-cluster instance, add the same ZooKeeper servers that the local quorum. You need to use the configuration file in [`conf/global_zookeeper.conf`](reference-configuration.md#configuration-store) using the same method for [local ZooKeeper](#local-zookeeper), but make sure to use a different port (2181 is the default for ZooKeeper). The following is an example that uses port 2184 for a three-node ZooKeeper cluster:
 
 ```properties
 
@@ -165,13 +159,11 @@ As before, create the `myid` files for each server on `data/global-zookeeper/myi
 
 When you deploy a global Pulsar instance, with clusters distributed across different geographical regions, the configuration store serves as a highly available and strongly consistent metadata store that can tolerate failures and partitions spanning whole regions.
 
-The key here is to make sure the ZK quorum members are spread across at least 3 regions and that other regions run as observers.
+The key here is to make sure the ZK quorum members are spread across at least 3 regions, and other regions run as observers.
 
-Again, given the very low expected load on the configuration store servers, you can
-share the same hosts used for the local ZooKeeper quorum.
+Again, given the very low expected load on the configuration store servers, you can share the same hosts used for the local ZooKeeper quorum.
 
-For example, assume a Pulsar instance with the following clusters `us-west`,
-`us-east`, `us-central`, `eu-central`, `ap-south`. Also assume, each cluster has its own local ZK servers named such as the following: 
+For example, assume a Pulsar instance with the following clusters `us-west`, `us-east`, `us-central`, `eu-central`, `ap-south`. Also assume, each cluster has its own local ZK servers named such as the following: 
 
 ```
 
@@ -179,8 +171,7 @@ zk[1-3].${CLUSTER}.example.com
 
 ```
 
-In this scenario if you want to pick the quorum participants from few clusters and
-let all the others be ZK observers. For example, to form a 7 servers quorum, you can pick 3 servers from `us-west`, 2 from `us-central` and 2 from `us-east`.
+In this scenario if you want to pick the quorum participants from few clusters and let all the others be ZK observers. For example, to form a 7 servers quorum, you can pick 3 servers from `us-west`, 2 from `us-central` and 2 from `us-east`.
 
 This method guarantees that writes to configuration store is possible even if one of these regions is unreachable.
 
@@ -227,7 +218,7 @@ $ bin/pulsar-daemon start configuration-store
 
 ## Cluster metadata initialization
 
-Once you set up the cluster-specific ZooKeeper and configuration store quorums for your instance, you need to write some metadata to ZooKeeper for each cluster in your instance. **you only needs to write these metadata once**.
+Once you set up the cluster-specific ZooKeeper and configuration store quorums for your instance, you need to write some metadata to ZooKeeper for each cluster in your instance. **you only need to write these metadata once**.
 
 You can initialize this metadata using the [`initialize-cluster-metadata`](reference-cli-tools.md#pulsar-initialize-cluster-metadata) command of the [`pulsar`](reference-cli-tools.md#pulsar) CLI tool. The following is an example:
 
@@ -260,7 +251,7 @@ Make sure to run `initialize-cluster-metadata` for each cluster in your instance
 
 BookKeeper provides [persistent message storage](concepts-architecture-overview.md#persistent-storage) for Pulsar.
 
-Each Pulsar broker needs to have its own cluster of bookies. The BookKeeper cluster shares a local ZooKeeper quorum with the Pulsar cluster.
+Each Pulsar broker needs its own cluster of bookies. The BookKeeper cluster shares a local ZooKeeper quorum with the Pulsar cluster.
 
 ### Configure bookies
 
@@ -280,7 +271,7 @@ $ bin/pulsar-daemon start bookie
 
 You can verify that the bookie works properly using the `bookiesanity` command for the [BookKeeper shell](reference-cli-tools.md#bookkeeper-shell):
 
-```shell
+```bash
 
 $ bin/bookkeeper shell bookiesanity
 
@@ -304,7 +295,7 @@ Bookie hosts are responsible for storing message data on disk. In order for book
 Message entries written to bookies are always synced to disk before returning an acknowledgement to the Pulsar broker. To ensure low write latency, BookKeeper is
 designed to use multiple devices:
 
-* A **journal** to ensure durability. For sequential writes, having fast [fsync](https://linux.die.net/man/2/fsync) operations on bookie hosts is critical. Typically, small and fast [solid-state drives](https://en.wikipedia.org/wiki/Solid-state_drive) (SSDs) should suffice, or [hard disk drives](https://en.wikipedia.org/wiki/Hard_disk_drive) (HDDs) with a [RAID](https://en.wikipedia.org/wiki/RAID)s controller and a battery-backed write cache. Both solutions can reach fsync latency of ~0.4 ms.
+* A **journal** to ensure durability. For sequential writes, having fast [fsync](https://linux.die.net/man/2/fsync) operations on bookie hosts is critical. Typically, small and fast [solid-state drives](https://en.wikipedia.org/wiki/Solid-state_drive) (SSDs) should suffice, or [hard disk drives](https://en.wikipedia.org/wiki/Hard_disk_drive) (HDDs) with a [RAID](https://en.wikipedia.org/wiki/RAID) controller and a battery-backed write cache. Both solutions can reach fsync latency of ~0.4 ms.
 * A **ledger storage device** is where data is stored until all consumers acknowledge the message. Writes happen in the background, so write I/O is not a big concern. Reads happen sequentially most of the time and the backlog is drained only in case of consumer drain. To store large amounts of data, a typical configuration involves multiple HDDs with a RAID controller.
 
 
@@ -371,39 +362,13 @@ $ bin/pulsar broker
 
 ## Service discovery
 
-[Clients](getting-started-clients) connecting to Pulsar brokers need to be able to communicate with an entire Pulsar instance using a single URL. Pulsar provides a built-in service discovery mechanism that you can set up using the instructions [immediately below](#service-discovery-setup).
+[Clients](getting-started-clients) connecting to Pulsar brokers need to communicate with an entire Pulsar instance using a single URL.
 
-You can also use your own service discovery system if you want. If you use your own system, you only need to satisfy just one requirement: when a client performs an HTTP request to an [endpoint](reference-configuration) for a Pulsar cluster, such as `http://pulsar.us-west.example.com:8080`, the client needs to be redirected to *some* active broker in the desired cluster, whether via DNS, an HTTP or IP redirect, or some other means.
+You can use your own service discovery system. If you use your own system, you only need to satisfy just one requirement: when a client performs an HTTP request to an [endpoint](reference-configuration) for a Pulsar cluster, such as `http://pulsar.us-west.example.com:8080`, the client needs to be redirected to some active brokers in the desired cluster, whether via DNS, an HTTP or IP redirect, or some other means.
 
-> #### Service discovery already provided by many scheduling systems
+> **Service discovery already provided by many scheduling systems**
 > Many large-scale deployment systems, such as [Kubernetes](deploy-kubernetes), have service discovery systems built in. If you run Pulsar on such a system, you may not need to provide your own service discovery mechanism.
 
-
-### Service discovery setup
-
-The service discovery mechanism that included with Pulsar maintains a list of active brokers, which stored in ZooKeeper, and supports lookup using HTTP and also the [binary protocol](developing-binary-protocol) of Pulsar.
-
-To get started setting up the built-in service of discovery of Pulsar, you need to change a few parameters in the [`conf/discovery.conf`](reference-configuration.md#service-discovery) configuration file. Set the [`zookeeperServers`](reference-configuration.md#service-discovery-zookeeperServers) parameter to the ZooKeeper quorum connection string of the cluster and the [`configurationStoreServers`](reference-configuration.md#service-discovery-configurationStoreServers) setting to the [con [...]
-store](reference-terminology.md#configuration-store) quorum connection string.
-
-```properties
-
-# Zookeeper quorum connection string
-zookeeperServers=zk1.us-west.example.com:2181,zk2.us-west.example.com:2181,zk3.us-west.example.com:2181
-
-# Global configuration store connection string
-configurationStoreServers=zk1.us-west.example.com:2184,zk2.us-west.example.com:2184,zk3.us-west.example.com:2184
-
-```
-
-To start the discovery service:
-
-```shell
-
-$ bin/pulsar-daemon start discovery
-
-```
-
 ## Admin client and verification
 
 At this point your Pulsar instance should be ready to use. You can now configure client machines that can serve as [administrative clients](admin-api-overview) for each cluster. You can use the [`conf/client.conf`](reference-configuration.md#client) configuration file to configure admin clients.
diff --git a/site2/website-next/versioned_docs/version-2.2.1/deploy-dcos.md b/site2/website-next/versioned_docs/version-2.2.1/deploy-dcos.md
index f5f8d1f..07f446e 100644
--- a/site2/website-next/versioned_docs/version-2.2.1/deploy-dcos.md
+++ b/site2/website-next/versioned_docs/version-2.2.1/deploy-dcos.md
@@ -7,18 +7,17 @@ original_id: deploy-dcos
 
 :::tip
 
-If you want to enable all builtin [Pulsar IO](io-overview) connectors in your Pulsar deployment, you can choose to use `apachepulsar/pulsar-all` image instead of
-`apachepulsar/pulsar` image. `apachepulsar/pulsar-all` image has already bundled [all builtin connectors](io-overview.md#working-with-connectors).
+To enable all built-in [Pulsar IO](io-overview) connectors in your Pulsar deploymente, we recommend you use `apachepulsar/pulsar-all` image instead of `apachepulsar/pulsar` image; the former has already bundled [all built-in connectors](io-overview.md#working-with-connectors).
 
 :::
 
-[DC/OS](https://dcos.io/) (the <strong>D</strong>ata<strong>C</strong>enter <strong>O</strong>perating <strong>S</strong>ystem) is a distributed operating system used for deploying and managing applications and systems on [Apache Mesos](http://mesos.apache.org/). DC/OS is an open-source tool that [Mesosphere](https://mesosphere.com/) creates and maintains .
+[DC/OS](https://dcos.io/) (the <strong>D</strong>ata<strong>C</strong>enter <strong>O</strong>perating <strong>S</strong>ystem) is a distributed operating system for deploying and managing applications and systems on [Apache Mesos](http://mesos.apache.org/). DC/OS is an open-source tool created and maintained by [Mesosphere](https://mesosphere.com/).
 
 Apache Pulsar is available as a [Marathon Application Group](https://mesosphere.github.io/marathon/docs/application-groups.html), which runs multiple applications as manageable sets.
 
 ## Prerequisites
 
-In order to run Pulsar on DC/OS, you need the following:
+You need to prepare your environment before running Pulsar on DC/OS.
 
 * DC/OS version [1.9](https://docs.mesosphere.com/1.9/) or higher
 * A [DC/OS cluster](https://docs.mesosphere.com/1.9/installing/) with at least three agent nodes
@@ -37,7 +36,7 @@ Each node in the DC/OS-managed Mesos cluster must have at least:
 * 4 GB of memory
 * 60 GB of total persistent disk
 
-Alternatively, you can change the configuration in `PulsarGroups.json` according to match your resources of DC/OS cluster.
+Alternatively, you can change the configuration in `PulsarGroups.json` accordingly to match your resources of the DC/OS cluster.
 
 ## Deploy Pulsar using the DC/OS command interface
 
@@ -56,9 +55,9 @@ This command deploys Docker container instances in three groups, which together
 * 1 [Prometheus](http://prometheus.io/) instance and 1 [Grafana](https://grafana.com/) instance
 
 
-> When you run DC/OS, a ZooKeeper cluster already runs at `master.mesos:2181`, thus you do not have to install or start up ZooKeeper separately.
+> When you run DC/OS, a ZooKeeper cluster will be running at `master.mesos:2181`, thus you do not have to install or start up ZooKeeper separately.
 
-After executing the `dcos` command above, click on the **Services** tab in the DC/OS [GUI interface](https://docs.mesosphere.com/latest/gui/), which you can access at [http://m1.dcos](http://m1.dcos) in this example. You should see several applications in the process of deploying.
+After executing the `dcos` command above, click the **Services** tab in the DC/OS [GUI interface](https://docs.mesosphere.com/latest/gui/), which you can access at [http://m1.dcos](http://m1.dcos) in this example. You should see several applications during the deployment.
 
 ![DC/OS command executed](/assets/dcos_command_execute.png)
 
@@ -66,15 +65,15 @@ After executing the `dcos` command above, click on the **Services** tab in the D
 
 ## The BookKeeper group
 
-To monitor the status of the BookKeeper cluster deployment, click on the **bookkeeper** group in the parent **pulsar** group.
+To monitor the status of the BookKeeper cluster deployment, click the **bookkeeper** group in the parent **pulsar** group.
 
 ![DC/OS bookkeeper status](/assets/dcos_bookkeeper_status.png)
 
-At this point, 3 [bookies](reference-terminology.md#bookie) should be shown as green, which means that the bookies have been deployed successfully and are now running.
+At this point, the status of the 3 [bookies](reference-terminology.md#bookie) are green, which means that the bookies have been deployed successfully and are running.
  
 ![DC/OS bookkeeper running](/assets/dcos_bookkeeper_run.png)
  
-You can also click into each bookie instance to get more detailed information, such as the bookie running log.
+You can also click each bookie instance to get more detailed information, such as the bookie running log.
 
 ![DC/OS bookie log](/assets/dcos_bookie_log.png)
 
@@ -82,23 +81,23 @@ To display information about the BookKeeper in ZooKeeper, you can visit [http://
 
 ![DC/OS bookkeeper in zk](/assets/dcos_bookkeeper_in_zookeeper.png)
 
-## The Pulsar broker Group
+## The Pulsar broker group
 
-Similar to the BookKeeper group above, click into the **brokers** to check the status of the Pulsar brokers.
+Similar to the BookKeeper group above, click **brokers** to check the status of the Pulsar brokers.
 
 ![DC/OS broker status](/assets/dcos_broker_status.png)
 
 ![DC/OS broker running](/assets/dcos_broker_run.png)
 
-You can also click into each broker instance to get more detailed information, such as the broker running log.
+You can also click each broker instance to get more detailed information, such as the broker running log.
 
 ![DC/OS broker log](/assets/dcos_broker_log.png)
 
-Broker cluster information in Zookeeper is also available through the web UI. In this example, you can see that the `loadbalance` and `managed-ledgers` directories have been created.
+Broker cluster information in ZooKeeper is also available through the web UI. In this example, you can see that the `loadbalance` and `managed-ledgers` directories have been created.
 
 ![DC/OS broker in zk](/assets/dcos_broker_in_zookeeper.png)
 
-## Monitor Group
+## Monitor group
 
 The **monitory** group consists of Prometheus and Grafana.
 
@@ -106,17 +105,17 @@ The **monitory** group consists of Prometheus and Grafana.
 
 ### Prometheus
 
-Click into the instance of `prom` to get the endpoint of Prometheus, which is `192.168.65.121:9090` in this example.
+Click the instance of `prom` to get the endpoint of Prometheus, which is `192.168.65.121:9090` in this example.
 
 ![DC/OS prom endpoint](/assets/dcos_prom_endpoint.png)
 
-If you click that endpoint, you can see the Prometheus dashboard. The [http://192.168.65.121:9090/targets](http://192.168.65.121:9090/targets) URL display all the bookies and brokers.
+If you click that endpoint, you can see the Prometheus dashboard. All the bookies and brokers are listed on [http://192.168.65.121:9090/targets](http://192.168.65.121:9090/targets).
 
 ![DC/OS prom targets](/assets/dcos_prom_targets.png)
 
 ### Grafana
 
-Click into `grafana` to get the endpoint for Grafana, which is `192.168.65.121:3000` in this example.
+Click `grafana` to get the endpoint for Grafana, which is `192.168.65.121:3000` in this example.
  
 ![DC/OS grafana endpoint](/assets/dcos_grafana_endpoint.png)
 
@@ -130,7 +129,7 @@ Now that you have a fully deployed Pulsar cluster, you can run a simple consumer
 
 ### Download and prepare the Pulsar Java tutorial
 
-You can clone a [Pulsar Java tutorial](https://github.com/streamlio/pulsar-java-tutorial) repo. This repo contains a simple Pulsar consumer and producer (you can find more information in the `README` file of the repo).
+You can clone a [Pulsar Java tutorial](https://github.com/streamlio/pulsar-java-tutorial) repo. This repo contains a simple Pulsar consumer and producer (you can find more information in the `README` file in this repo).
 
 ```bash
 
@@ -138,12 +137,13 @@ $ git clone https://github.com/streamlio/pulsar-java-tutorial
 
 ```
 
-Change the `SERVICE_URL` from `pulsar://localhost:6650` to `pulsar://a1.dcos:6650` in both [`ConsumerTutorial.java`](https://github.com/streamlio/pulsar-java-tutorial/blob/master/src/main/java/tutorial/ConsumerTutorial.java) and [`ProducerTutorial.java`](https://github.com/streamlio/pulsar-java-tutorial/blob/master/src/main/java/tutorial/ProducerTutorial.java).
-The `pulsar://a1.dcos:6650` endpoint is for the broker service. You can fetch the endpoint details for each broker instance from the DC/OS GUI. `a1.dcos` is a DC/OS client agent, which runs a broker. The client agent IP address can also replace this.
+Change the `SERVICE_URL` from `pulsar://localhost:6650` to `pulsar://a1.dcos:6650` in both [`ConsumerTutorial.java`](https://github.com/streamlio/pulsar-java-tutorial/blob/master/src/main/java/tutorial/ConsumerTutorial.java) file and [`ProducerTutorial.java`](https://github.com/streamlio/pulsar-java-tutorial/blob/master/src/main/java/tutorial/ProducerTutorial.java) file.
 
-Now, change the message number from 10 to 10000000 in main method of [`ProducerTutorial.java`](https://github.com/streamlio/pulsar-java-tutorial/blob/master/src/main/java/tutorial/ProducerTutorial.java) so that it can produce more messages.
+The `pulsar://a1.dcos:6650` endpoint is for the broker service. You can fetch the endpoint details for each broker instance from the DC/OS GUI. `a1.dcos` is a DC/OS client agent that runs a broker, and you can replace it with the client agent IP address.
 
-Now compile the project code using the command below:
+Now, you can change the message number from 10 to 10000000 in the main method in [`ProducerTutorial.java`](https://github.com/streamlio/pulsar-java-tutorial/blob/master/src/main/java/tutorial/ProducerTutorial.java) file to produce more messages.
+
+Then, you can compile the project code using the command below:
 
 ```bash
 
@@ -169,7 +169,7 @@ $ mvn exec:java -Dexec.mainClass="tutorial.ProducerTutorial"
 
 ```
 
-You can see the producer producing messages and the consumer consuming messages through the DC/OS GUI.
+You see that the producer is producing messages and the consumer is consuming messages through the DC/OS GUI.
 
 ![DC/OS pulsar producer](/assets/dcos_producer.png)
 
@@ -177,20 +177,20 @@ You can see the producer producing messages and the consumer consuming messages
 
 ### View Grafana metric output
 
-While the producer and consumer run, you can access running metrics information from Grafana.
+While the producer and consumer are running, you can access the running metrics from Grafana.
 
 ![DC/OS pulsar dashboard](/assets/dcos_metrics.png)
 
 
 ## Uninstall Pulsar
 
-You can shut down and uninstall the `pulsar` application from DC/OS at any time in the following two ways:
+You can shut down and uninstall the `pulsar` application from DC/OS at any time in one of the following two ways:
 
-1. Using the DC/OS GUI, you can choose **Delete** at the right end of Pulsar group.
+1. Click the three dots at the right end of Pulsar group and choose **Delete** on the DC/OS GUI.
 
    ![DC/OS pulsar uninstall](/assets/dcos_uninstall.png)
 
-2. You can use the following command:
+2. Use the command below.
 
    ```bash
    
diff --git a/site2/website-next/versioned_docs/version-2.2.1/deploy-kubernetes.md b/site2/website-next/versioned_docs/version-2.2.1/deploy-kubernetes.md
index dc7123d..4e170dc 100644
--- a/site2/website-next/versioned_docs/version-2.2.1/deploy-kubernetes.md
+++ b/site2/website-next/versioned_docs/version-2.2.1/deploy-kubernetes.md
@@ -2,7 +2,6 @@
 id: deploy-kubernetes
 title: Deploy Pulsar on Kubernetes
 sidebar_label: "Kubernetes"
-original_id: deploy-kubernetes
 ---
 
 To get up and running with these charts as fast as possible, in a **non-production** use case, we provide
diff --git a/site2/website-next/versioned_docs/version-2.2.1/deploy-monitoring.md b/site2/website-next/versioned_docs/version-2.2.1/deploy-monitoring.md
index 074ce3f..95ccdd6 100644
--- a/site2/website-next/versioned_docs/version-2.2.1/deploy-monitoring.md
+++ b/site2/website-next/versioned_docs/version-2.2.1/deploy-monitoring.md
@@ -2,7 +2,6 @@
 id: deploy-monitoring
 title: Monitor
 sidebar_label: "Monitor"
-original_id: deploy-monitoring
 ---
 
 You can use different ways to monitor a Pulsar cluster, exposing both metrics related to the usage of topics and the overall health of the individual components of the cluster.
@@ -127,17 +126,7 @@ The per-topic dashboard instructions are available at [Pulsar manager](administr
 
 You can use grafana to create dashboard driven by the data that is stored in Prometheus.
 
-When you deploy Pulsar on Kubernetes, a `pulsar-grafana` Docker image is enabled by default. You can use the docker image with the principal dashboards.
-
-Enter the command below to use the dashboard manually:
-
-```shell
-
-docker run -p3000:3000 \
-        -e PROMETHEUS_URL=http://$PROMETHEUS_HOST:9090/ \
-        apachepulsar/pulsar-grafana:latest
-
-```
+When you deploy Pulsar on Kubernetes with the Pulsar Helm Chart, a `pulsar-grafana` Docker image is enabled by default. You can use the docker image with the principal dashboards.
 
 The following are some Grafana dashboards examples:
 
@@ -145,4 +134,4 @@ The following are some Grafana dashboards examples:
 - [apache-pulsar-grafana-dashboard](https://github.com/streamnative/apache-pulsar-grafana-dashboard): a collection of Grafana dashboard templates for different Pulsar components running on both Kubernetes and on-premise machines.
 
 ## Alerting rules
-You can set alerting rules according to your Pulsar environment. To configure alerting rules for Apache Pulsar, refer to [alerting rules](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/).
+You can set alerting rules according to your Pulsar environment. To configure alerting rules for Apache Pulsar, refer to [alerting rules](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/).
\ No newline at end of file
diff --git a/site2/website-next/versioned_docs/version-2.2.1/develop-binary-protocol.md b/site2/website-next/versioned_docs/version-2.2.1/develop-binary-protocol.md
index b233f10..fa03383 100644
--- a/site2/website-next/versioned_docs/version-2.2.1/develop-binary-protocol.md
+++ b/site2/website-next/versioned_docs/version-2.2.1/develop-binary-protocol.md
@@ -2,7 +2,6 @@
 id: develop-binary-protocol
 title: Pulsar binary protocol specification
 sidebar_label: "Binary protocol"
-original_id: develop-binary-protocol
 ---
 
 Pulsar uses a custom binary protocol for communications between producers/consumers and brokers. This protocol is designed to support required features, such as acknowledgements and flow control, while ensuring maximum transport and implementation efficiency.
@@ -29,42 +28,63 @@ The Pulsar protocol allows for two types of commands:
 
 Simple (payload-free) commands have this basic structure:
 
-| Component   | Description                                                                             | Size (in bytes) |
-|:------------|:----------------------------------------------------------------------------------------|:----------------|
-| totalSize   | The size of the frame, counting everything that comes after it (in bytes)               | 4               |
-| commandSize | The size of the protobuf-serialized command                                             | 4               |
-| message     | The protobuf message serialized in a raw binary format (rather than in protobuf format) |                 |
+| Component     | Description                                                                             | Size (in bytes) |
+|:--------------|:----------------------------------------------------------------------------------------|:----------------|
+| `totalSize`   | The size of the frame, counting everything that comes after it (in bytes)               | 4               |
+| `commandSize` | The size of the protobuf-serialized command                                             | 4               |
+| `message`     | The protobuf message serialized in a raw binary format (rather than in protobuf format) |                 |
 
 ### Payload commands
 
 Payload commands have this basic structure:
 
-| Component    | Description                                                                                 | Size (in bytes) |
-|:-------------|:--------------------------------------------------------------------------------------------|:----------------|
-| totalSize    | The size of the frame, counting everything that comes after it (in bytes)                   | 4               |
-| commandSize  | The size of the protobuf-serialized command                                                 | 4               |
-| message      | The protobuf message serialized in a raw binary format (rather than in protobuf format)     |                 |
-| magicNumber  | A 2-byte byte array (`0x0e01`) identifying the current format                               | 2               |
-| checksum     | A [CRC32-C checksum](http://www.evanjones.ca/crc32c.html) of everything that comes after it | 4               |
-| metadataSize | The size of the message [metadata](#message-metadata)                                       | 4               |
-| metadata     | The message [metadata](#message-metadata) stored as a binary protobuf message               |                 |
-| payload      | Anything left in the frame is considered the payload and can include any sequence of bytes  |                 |
+| Component                          | Required or optional| Description                                                                                 | Size (in bytes) |
+|:-----------------------------------|:----------|:--------------------------------------------------------------------------------------------|:----------------|
+| `totalSize`                        | Required  | The size of the frame, counting everything that comes after it (in bytes)                   | 4               |
+| `commandSize`                      | Required  | The size of the protobuf-serialized command                                                 | 4               |
+| `message`                          | Required  | The protobuf message serialized in a raw binary format (rather than in protobuf format)     |                 |
+| `magicNumberOfBrokerEntryMetadata` | Optional  | A 2-byte byte array (`0x0e02`) identifying the broker entry metadata   <br /> **Note**: `magicNumberOfBrokerEntryMetadata` , `brokerEntryMetadataSize`, and `brokerEntryMetadata` should be used **together**.                     | 2               |
+| `brokerEntryMetadataSize`          | Optional  | The size of the broker entry metadata                                                       | 4               |
+| `brokerEntryMetadata`              | Optional  | The broker entry metadata stored as a binary protobuf message                               |                 |
+| `magicNumber`                      | Required  | A 2-byte byte array (`0x0e01`) identifying the current format                               | 2               |
+| `checksum`                         | Required  | A [CRC32-C checksum](http://www.evanjones.ca/crc32c.html) of everything that comes after it | 4               |
+| `metadataSize`                     | Required  | The size of the message [metadata](#message-metadata)                                       | 4               |
+| `metadata`                         | Required  | The message [metadata](#message-metadata) stored as a binary protobuf message               |                 |
+| `payload`                          | Required  | Anything left in the frame is considered the payload and can include any sequence of bytes  |                 |
+
+## Broker entry metadata
+
+Broker entry metadata is stored alongside the message metadata as a serialized protobuf message.
+It is created by the broker when the message arrived at the broker and passed without changes to the consumer if configured.
+
+| Field              | Required or optional       | Description                                                                                                                   |
+|:-------------------|:----------------|:------------------------------------------------------------------------------------------------------------------------------|
+| `broker_timestamp` | Optional        | The timestamp when a message arrived at the broker (`id est` as the number of milliseconds since January 1st, 1970 in UTC)      |
+| `index`            | Optional        | The index of the message. It is assigned by the broker.
+
+If you want to use broker entry metadata for **brokers**, configure the [`brokerEntryMetadataInterceptors`](reference-configuration.md#broker) parameter in the `broker.conf` file.
+
+If you want to use broker entry metadata for **consumers**:
+
+1. Use the client protocol version [18 or later](https://github.com/apache/pulsar/blob/ca37e67211feda4f7e0984e6414e707f1c1dfd07/pulsar-common/src/main/proto/PulsarApi.proto#L259).
+   
+2. Configure the [`brokerEntryMetadataInterceptors`](reference-configuration.md#broker) parameter and set the [`enableExposingBrokerEntryMetadataToClient`](reference-configuration.md#broker) parameter to `true` in the `broker.conf` file.
 
 ## Message metadata
 
-Message metadata is stored alongside the application-specified payload as a serialized protobuf message. Metadata is created by the producer and passed on unchanged to the consumer.
+Message metadata is stored alongside the application-specified payload as a serialized protobuf message. Metadata is created by the producer and passed without changes to the consumer.
 
-| Field                                | Description                                                                                                                                                                                                                                               |
-|:-------------------------------------|:----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| `producer_name`                      | The name of the producer that published the message                                                                                                                                                                                         |
-| `sequence_id`                        | The sequence ID of the message, assigned by producer                                                                                                                                                                                        |
-| `publish_time`                       | The publish timestamp in Unix time (i.e. as the number of milliseconds since January 1st, 1970 in UTC)                                                                                                                                                    |
-| `properties`                         | A sequence of key/value pairs (using the [`KeyValue`](https://github.com/apache/pulsar/blob/master/pulsar-common/src/main/proto/PulsarApi.proto#L32) message). These are application-defined keys and values with no special meaning to Pulsar. |
-| `replicated_from` *(optional)*       | Indicates that the message has been replicated and specifies the name of the [cluster](reference-terminology.md#cluster) where the message was originally published                                                                                                             |
-| `partition_key` *(optional)*         | While publishing on a partition topic, if the key is present, the hash of the key is used to determine which partition to choose                                                                                                                          |
-| `compression` *(optional)*           | Signals that payload has been compressed and with which compression library                                                                                                                                                                               |
-| `uncompressed_size` *(optional)*     | If compression is used, the producer must fill the uncompressed size field with the original payload size                                                                                                                                                 |
-| `num_messages_in_batch` *(optional)* | If this message is really a [batch](#batch-messages) of multiple entries, this field must be set to the number of messages in the batch                                                                                                                   |
+| Field                    | Required or optional | Description                                                                                                                                                                                                                                               |
+|:-------------------------|:----------|:----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `producer_name`          | Required  | The name of the producer that published the message                                                                                                                                                                                         |
+| `sequence_id`            | Required  | The sequence ID of the message, assigned by producer                                                                                                                                                                                        |
+| `publish_time`           | Required  | The publish timestamp in Unix time (i.e. as the number of milliseconds since January 1st, 1970 in UTC)                                                                                                                                                    |
+| `properties`             | Required  | A sequence of key/value pairs (using the [`KeyValue`](https://github.com/apache/pulsar/blob/master/pulsar-common/src/main/proto/PulsarApi.proto#L32) message). These are application-defined keys and values with no special meaning to Pulsar. |
+| `replicated_from`        | Optional  |  Indicates that the message has been replicated and specifies the name of the [cluster](reference-terminology.md#cluster) where the message was originally published                                                                                                             |
+| `partition_key`          | Optional  | While publishing on a partition topic, if the key is present, the hash of the key is used to determine which partition to choose. Partition key is used as the message key.                                                                                                                          |
+| `compression`            | Optional  | Signals that payload has been compressed and with which compression library                                                                                                                                                                               |
+| `uncompressed_size`      | Optional  | If compression is used, the producer must fill the uncompressed size field with the original payload size                                                                                                                                                 |
+| `num_messages_in_batch`  | Optional  | If this message is really a [batch](#batch-messages) of multiple entries, this field must be set to the number of messages in the batch                                                                                                                   |
 
 ### Batch messages
 
@@ -76,19 +96,19 @@ object.
 For a single batch, the payload format will look like this:
 
 
-| Field         | Description                                                 |
-|:--------------|:------------------------------------------------------------|
-| metadataSizeN | The size of the single message metadata serialized Protobuf |
-| metadataN     | Single message metadata                                     |
-| payloadN      | Message payload passed by application                       |
+| Field           | Required or optional | Description                                                |
+|:----------------|:---------------------|:-----------------------------------------------------------|
+| `metadataSizeN` | Required             |The size of the single message metadata serialized Protobuf |
+| `metadataN`     | Required             |Single message metadata                                     |
+| `payloadN`      | Required             |Message payload passed by application                       |
 
 Each metadata field looks like this;
 
-| Field                      | Description                                             |
-|:---------------------------|:--------------------------------------------------------|
-| properties                 | Application-defined properties                          |
-| partition key *(optional)* | Key to indicate the hashing to a particular partition   |
-| payload_size               | Size of the payload for the single message in the batch |
+| Field           | Required or optional  | Description                                             |
+|:----------------|:----------------------|:--------------------------------------------------------|
+| `properties`    | Required              | Application-defined properties                          |
+| `partition key` | Optional              | Key to indicate the hashing to a particular partition   |
+| `payload_size`  | Required              | Size of the payload for the single message in the batch |
 
 When compression is enabled, the whole batch will be compressed at once.
 
@@ -170,6 +190,10 @@ messages to the broker, referring to the producer id negotiated before.
 
 ![Producer interaction](/assets/binary-protocol-producer.png)
 
+If the client does not receive a response indicating producer creation success or failure,
+the client should first send a command to close the original producer before sending a
+command to re-attempt producer creation.
+
 ##### Command Producer
 
 ```protobuf
@@ -273,6 +297,11 @@ When receiving a `CloseProducer` command, the broker will stop accepting any
 more messages for the producer, wait until all pending messages are persisted
 and then reply `Success` to the client.
 
+If the client does not receive a response to a `Producer` command within a timeout,
+the client must first send a `CloseProducer` command before sending another
+`Producer` command. The client does not need to await a response to the `CloseProducer`
+command before sending the next `Producer` command.
+
 The broker can send a `CloseProducer` command to client when it's performing
 a graceful failover (eg: broker is being restarted, or the topic is being unloaded
 by load balancer to be transferred to a different broker).
diff --git a/site2/website-next/versioned_docs/version-2.2.1/develop-load-manager.md b/site2/website-next/versioned_docs/version-2.2.1/develop-load-manager.md
index 509209b..9687f30 100644
--- a/site2/website-next/versioned_docs/version-2.2.1/develop-load-manager.md
+++ b/site2/website-next/versioned_docs/version-2.2.1/develop-load-manager.md
@@ -2,7 +2,6 @@
 id: develop-load-manager
 title: Modular load manager
 sidebar_label: "Modular load manager"
-original_id: develop-load-manager
 ---
 
 The *modular load manager*, implemented in  [`ModularLoadManagerImpl`](https://github.com/apache/pulsar/blob/master/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/ModularLoadManagerImpl.java), is a flexible alternative to the previously implemented load manager, [`SimpleLoadManagerImpl`](https://github.com/apache/pulsar/blob/master/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/SimpleLoadManagerImpl.java), which attempts to simplify how load  [...]
diff --git a/site2/website-next/versioned_docs/version-2.2.1/develop-tools.md b/site2/website-next/versioned_docs/version-2.2.1/develop-tools.md
index b545779..d034926 100644
--- a/site2/website-next/versioned_docs/version-2.2.1/develop-tools.md
+++ b/site2/website-next/versioned_docs/version-2.2.1/develop-tools.md
@@ -2,7 +2,6 @@
 id: develop-tools
 title: Simulation tools
 sidebar_label: "Simulation tools"
-original_id: develop-tools
 ---
 
 It is sometimes necessary create an test environment and incur artificial load to observe how well load managers
diff --git a/site2/website-next/versioned_docs/version-2.2.1/functions-api.md b/site2/website-next/versioned_docs/version-2.2.1/functions-api.md
index ee4fe90..7cdbfd5 100644
--- a/site2/website-next/versioned_docs/version-2.2.1/functions-api.md
+++ b/site2/website-next/versioned_docs/version-2.2.1/functions-api.md
@@ -2,798 +2,4 @@
 id: functions-api
 title: The Pulsar Functions API
 sidebar_label: "API"
-original_id: functions-api
 ---
-
-[Pulsar Functions](functions-overview) provides an easy-to-use API that developers can use to create and manage processing logic for the Apache Pulsar messaging system. With Pulsar Functions, you can write functions of any level of complexity in [Java](#functions-for-java) or [Python](#functions-for-python) and run them in conjunction with a Pulsar cluster without needing to run a separate stream processing engine.
-
-> For a more in-depth overview of the Pulsar Functions feature, see the [Pulsar Functions overview](functions-overview).
-
-## Core programming model
-
-Pulsar Functions provide a wide range of functionality but are based on a very simple programming model. You can think of Pulsar Functions as lightweight processes that
-
-* consume messages from one or more Pulsar topics and then
-* apply some user-defined processing logic to each incoming message. That processing logic could be just about anything you want, including
-  * producing the resulting, processed message on another Pulsar topic, or
-  * doing something else with the message, such as writing results to an external database.
-
-You could use Pulsar Functions, for example, to set up the following processing chain:
-
-* A [Python](#functions-for-python) function listens on the `raw-sentences` topic and "[sanitizes](#example-function)" incoming strings (removing extraneous whitespace and converting all characters to lower case) and then publishes the results to a `sanitized-sentences` topic
-* A [Java](#functions-for-java) function listens on the `sanitized-sentences` topic, counts the number of times each word appears within a specified time window, and publishes the results to a `results` topic
-* Finally, a Python function listens on the `results` topic and writes the results to a MySQL table
-
-### Example function
-
-Here's an example "input sanitizer" function written in Python and stored in a `sanitizer.py` file:
-
-```python
-
-def clean_string(s):
-    return s.strip().lower()
-
-def process(input):
-    return clean_string(input)
-
-```
-
-Some things to note about this Pulsar Function:
-
-* There is no client, producer, or consumer object involved. All message "plumbing" is already taken care of for you, enabling you to worry only about processing logic.
-* No topics, subscription types, tenants, or namespaces are specified in the function logic itself. Instead, topics are specified upon [deployment](#example-deployment). This means that you can use and re-use Pulsar Functions across topics, tenants, and namespaces without needing to hard-code those attributes.
-
-### Example deployment
-
-Deploying Pulsar Functions is handled by the [`pulsar-admin`](reference-pulsar-admin) CLI tool, in particular the [`functions`](reference-pulsar-admin.md#functions) command. Here's an example command that would run our [sanitizer](#example-function) function from above in [local run](functions-deploying.md#local-run-mode) mode:
-
-```bash
-
-$ bin/pulsar-admin functions localrun \
-  --py sanitizer.py \          # The Python file with the function's code
-  --classname sanitizer \      # The class or function holding the processing logic
-  --tenant public \            # The function's tenant (derived from the topic name by default)
-  --namespace default \        # The function's namespace (derived from the topic name by default)
-  --name sanitizer-function \  # The name of the function (the class name by default)
-  --inputs dirty-strings-in \  # The input topic(s) for the function
-  --output clean-strings-out \ # The output topic for the function
-  --log-topic sanitizer-logs   # The topic to which all functions logs are published
-
-```
-
-For instructions on running functions in your Pulsar cluster, see the [Deploying Pulsar Functions](functions-deploying) guide.
-
-### Available APIs
-
-In both Java and Python, you have two options for writing Pulsar Functions:
-
-Interface | Description | Use cases
-:---------|:------------|:---------
-Language-native interface | No Pulsar-specific libraries or special dependencies required (only core libraries from Java/Python) | Functions that don't require access to the function's [context](#context)
-Pulsar Function SDK for Java/Python | Pulsar-specific libraries that provide a range of functionality not provided by "native" interfaces | Functions that require access to the function's [context](#context)
-
-In Python, for example, this language-native function, which adds an exclamation point to all incoming strings and publishes the resulting string to a topic, would have no external dependencies:
-
-```python
-
-def process(input):
-    return "{}!".format(input)
-
-```
-
-This function, however, would use the Pulsar Functions [SDK for Python](#python-sdk-functions):
-
-```python
-
-from pulsar import Function
-
-class DisplayFunctionName(Function):
-    def process(self, input, context):
-        function_name = context.function_name()
-        return "The function processing this message has the name {0}".format(function_name)
-
-```
-
-### Functions, Messages and Message Types
-
-Pulsar Functions can take byte arrays as inputs and spit out byte arrays as output. However in languages that support typed interfaces(just Java at the moment) one can write typed Functions as well. In this scenario, there are two ways one can bind messages to types.
-* [Schema Registry](#Schema-Registry)
-* [SerDe](#SerDe)
-
-### Schema Registry
-Pulsar has a built in [Schema Registry](concepts-schema-registry) and comes bundled with a variety of popular schema types(avro, json and protobuf). Pulsar Functions can leverage existing schema information from input topics to derive the input type. The same applies for output topic as well.
-
-### SerDe
-
-SerDe stands for **Ser**ialization and **De**serialization. All Pulsar Functions use SerDe for message handling. How SerDe works by default depends on the language you're using for a particular function:
-
-* In [Python](#python-serde), the default SerDe is identity, meaning that the type is serialized as whatever type the producer function returns
-* In [Java](#java-serde), a number of commonly used types (`String`s, `Integer`s, etc.) are supported by default
-
-In both languages, however, you can write your own custom SerDe logic for more complex, application-specific types. See the docs for [Java](#java-serde) and [Python](#python-serde) for language-specific instructions.
-
-### Context
-
-Both the [Java](#java-sdk-functions) and [Python](#python-sdk-functions) SDKs provide access to a **context object** that can be used by the function. This context object provides a wide variety of information and functionality to the function:
-
-* The name and ID of the Pulsar Function
-* The message ID of each message. Each Pulsar message is automatically assigned an ID.
-* The key, event time, properties and partition key of each message
-* The name of the topic on which the message was sent
-* The names of all input topics as well as the output topic associated with the function
-* The name of the class used for [SerDe](#serialization-and-deserialization-serde)
-* The [tenant](reference-terminology.md#tenant) and namespace associated with the function
-* The ID of the Pulsar Functions instance running the function
-* The version of the function
-* The [logger object](functions-overview.md#logging) used by the function, which can be used to create function log messages
-* Access to arbitrary [user config](#user-config) values supplied via the CLI
-* An interface for recording [metrics](functions-metrics)
-* An interface for storing and retrieving state in [state storage](functions-overview.md#state-storage)
-* A function to publish new messages onto arbitrary topics.
-* A function to acknowledge the message being processed (if auto-acknowledgement is disabled).
-
-### User config
-
-When you run or update Pulsar Functions created using the [SDK](#available-apis), you can pass arbitrary key/values to them via the command line with the `--userConfig` flag. Key/values must be specified as JSON. Here's an example of a function creation command that passes a user config key/value to a function:
-
-```bash
-
-$ bin/pulsar-admin functions create \
-  --name word-filter \
-  # Other function configs
-  --user-config '{"forbidden-word":"rosebud"}'
-
-```
-
-If the function were a Python function, that config value could be accessed like this:
-
-```python
-
-from pulsar import Function
-
-class WordFilter(Function):
-    def process(self, context, input):
-        forbidden_word = context.user_config()["forbidden-word"]
-
-        # Don't publish the message if it contains the user-supplied
-        # forbidden word
-        if forbidden_word in input:
-            pass
-        # Otherwise publish the message
-        else:
-            return input
-
-```
-
-## Functions for Java
-
-Writing Pulsar Functions in Java involves implementing one of two interfaces:
-
-* The [`java.util.Function`](https://docs.oracle.com/javase/8/docs/api/java/util/function/Function.html) interface
-* The {@inject: javadoc:Function:/pulsar-functions/org/apache/pulsar/functions/api/Function} interface. This interface works much like the `java.util.Function` interface, but with the important difference that it provides a {@inject: javadoc:Context:/pulsar-functions/org/apache/pulsar/functions/api/Context} object that you can use in a [variety of ways](#context)
-
-### Get started
-
-In order to write Pulsar Functions in Java, you'll need to install the proper [dependencies](#dependencies) and package your function [as a JAR](#packaging).
-
-#### Dependencies
-
-How you get started writing Pulsar Functions in Java depends on which API you're using:
-
-* If you're writing a [Java native function](#java-native-functions), you won't need any external dependencies.
-* If you're writing a [Java SDK function](#java-sdk-functions), you'll need to import the `pulsar-functions-api` library.
-
-  Here's an example for a Maven `pom.xml` configuration file:
-
-  ```xml
-  
-  <dependency>
-    <groupId>org.apache.pulsar</groupId>
-    <artifactId>pulsar-functions-api</artifactId>
-    <version>2.1.1-incubating</version>
-  </dependency>
-  
-  ```
-
-  Here's an example for a Gradle `build.gradle` configuration file:
-
-  ```groovy
-  
-  dependencies {
-  compile group: 'org.apache.pulsar', name: 'pulsar-functions-api', version: '2.1.1-incubating'
-  }
-  
-  ```
-
-#### Packaging
-
-Whether you're writing Java Pulsar Functions using the [native](#java-native-functions) Java `java.util.Function` interface or using the [Java SDK](#java-sdk-functions), you'll need to package your function(s) as a "fat" JAR.
-
-> #### Starter repo
-> If you'd like to get up and running quickly, you can use [this repo](https://github.com/streamlio/pulsar-functions-java-starter), which contains the necessary Maven configuration to build a fat JAR as well as some example functions.
-
-### Java native functions
-
-If your function doesn't require access to its [context](#context), you can create a Pulsar Function by implementing the [`java.util.Function`](https://docs.oracle.com/javase/8/docs/api/java/util/function/Function.html) interface, which has this very simple, single-method signature:
-
-```java
-
-public interface Function<I, O> {
-    O apply(I input);
-}
-
-```
-
-Here's an example function that takes a string as its input, adds an exclamation point to the end of the string, and then publishes the resulting string:
-
-```java
-
-import java.util.Function;
-
-public class ExclamationFunction implements Function<String, String> {
-    @Override
-    public String process(String input) {
-        return String.format("%s!", input);
-    }
-}
-
-```
-
-In general, you should use native functions when you don't need access to the function's [context](#context). If you *do* need access to the function's context, then we recommend using the [Pulsar Functions Java SDK](#java-sdk-functions).
-
-#### Java native examples
-
-There is one example Java native function in this {@inject: github:folder:/pulsar-functions/java-examples/src/main/java/org/apache/pulsar/functions/api/examples}:
-
-* {@inject: github:JavaNativeExclamationFunction:/pulsar-functions/java-examples/src/main/java/org/apache/pulsar/functions/api/examples/JavaNativeExclamationFunction.java}
-
-### Java SDK functions
-
-To get started developing Pulsar Functions using the Java SDK, you'll need to add a dependency on the `pulsar-functions-api` artifact to your project. Instructions can be found [above](#dependencies).
-
-> An easy way to get up and running with Pulsar Functions in Java is to clone the [`pulsar-functions-java-starter`](https://github.com/streamlio/pulsar-functions-java-starter) repo and follow the instructions there.
-
-
-#### Java SDK examples
-
-There are several example Java SDK functions in this {@inject: github:folder:/pulsar-functions/java-examples/src/main/java/org/apache/pulsar/functions/api/examples}:
-
-Function name | Description
-:-------------|:-----------
-[`ContextFunction`](https://github.com/apache/pulsar/blob/master/pulsar-functions/java-examples/src/main/java/org/apache/pulsar/functions/api/examples/ContextFunction.java) | Illustrates [context](#context)-specific functionality like [logging](#java-logging) and [metrics](#java-metrics)
-[`WordCountFunction`](https://github.com/apache/pulsar/blob/master/pulsar-functions/java-examples/src/main/java/org/apache/pulsar/functions/api/examples/WordCountFunction.java) | Illustrates usage of Pulsar Function [state-storage](functions-overview.md#state-storage)
-[`ExclamationFunction`](https://github.com/apache/pulsar/blob/master/pulsar-functions/java-examples/src/main/java/org/apache/pulsar/functions/api/examples/ExclamationFunction.java) | A basic string manipulation function for the Java SDK [`LoggingFunction`](https://github.com/apache/pulsar/blob/master/pulsar-functions/java-examples/src/main/java/org/apache/pulsar/functions/api/examples/LoggingFunction.java) | A function that shows how [logging](#java-logging) works for Java [`PublishFunct [...]
-[`UserConfigFunction`](https://github.com/apache/pulsar/blob/master/pulsar-functions/java-examples/src/main/java/org/apache/pulsar/functions/api/examples/UserConfigFunction.java) | A function that consumes [user-supplied configuration](#java-user-config) values [`UserMetricFunction`](https://github.com/apache/pulsar/blob/master/pulsar-functions/java-examples/src/main/java/org/apache/pulsar/functions/api/examples/UserMetricFunction.java) | A function that records metrics [`VoidFunction`]( [...]
-
-### Java context object
-
-The {@inject: javadoc:Context:/client/org/apache/pulsar/functions/api/Context} interface provides a number of methods that you can use to access the function's [context](#context). The various method signatures for the `Context` interface are listed below:
-
-```java
-
-public interface Context {
-    Record<?> getCurrentRecord();
-    Collection<String> getInputTopics();
-    String getOutputTopic();
-    String getOutputSchemaType();
-    String getTenant();
-    String getNamespace();
-    String getFunctionName();
-    String getFunctionId();
-    String getInstanceId();
-    String getFunctionVersion();
-    Logger getLogger();
-    void incrCounter(String key, long amount);
-    void incrCounterAsync(String key, long amount);
-    long getCounter(String key);
-    long getCounterAsync(String key);
-    void putState(String key, ByteBuffer value);
-    void putStateAsync(String key, ByteBuffer value);
-    ByteBuffer getState(String key);
-    ByteBuffer getStateAsync(String key);
-    Map<String, Object> getUserConfigMap();
-    Optional<Object> getUserConfigValue(String key);
-    Object getUserConfigValueOrDefault(String key, Object defaultValue);
-    void recordMetric(String metricName, double value);
-    <O> CompletableFuture<Void> publish(String topicName, O object, String schemaOrSerdeClassName);
-    <O> CompletableFuture<Void> publish(String topicName, O object);
-}
-
-```
-
-Here's an example function that uses several methods available via the `Context` object:
-
-```java
-
-import org.apache.pulsar.functions.api.Context;
-import org.apache.pulsar.functions.api.Function;
-import org.slf4j.Logger;
-
-import java.util.stream.Collectors;
-
-public class ContextFunction implements Function<String, Void> {
-    public Void process(String input, Context context) {
-        Logger LOG = context.getLogger();
-        String inputTopics = context.getInputTopics().stream().collect(Collectors.joining(", "));
-        String functionName = context.getFunctionName();
-
-        String logMessage = String.format("A message with a value of \"%s\" has arrived on one of the following topics: %s\n",
-                input,
-                inputTopics);
-
-        LOG.info(logMessage);
-
-        String metricName = String.format("function-%s-messages-received", functionName);
-        context.recordMetric(metricName, 1);
-
-        return null;
-    }
-}
-
-```
-
-### Void functions
-
-Pulsar Functions can publish results to an output topic, but this isn't required. You can also have functions that simply produce a log, write results to a database, etc. Here's a function that writes a simple log every time a message is received:
-
-```java
-
-import org.slf4j.Logger;
-
-public class LogFunction implements PulsarFunction<String, Void> {
-    public String apply(String input, Context context) {
-        Logger LOG = context.getLogger();
-        LOG.info("The following message was received: {}", input);
-        return null;
-    }
-}
-
-```
-
-> When using Java functions in which the output type is `Void`, the function must *always* return `null`.
-
-### Java SerDe
-
-Pulsar Functions use [SerDe](#serialization-and-deserialization-serde) when publishing data to and consuming data from Pulsar topics. When you're writing Pulsar Functions in Java, the following basic Java types are built in and supported by default:
-
-* `String`
-* `Double`
-* `Integer`
-* `Float`
-* `Long`
-* `Short`
-* `Byte`
-
-Built-in vs. custom. For custom, you need to implement this interface:
-
-```java
-
-public interface SerDe<T> {
-    T deserialize(byte[] input);
-    byte[] serialize(T input);
-}
-
-```
-
-#### Java SerDe example
-
-Imagine that you're writing Pulsar Functions in Java that are processing tweet objects. Here's a simple example `Tweet` class:
-
-```java
-
-public class Tweet {
-    private String username;
-    private String tweetContent;
-
-    public Tweet(String username, String tweetContent) {
-        this.username = username;
-        this.tweetContent = tweetContent;
-    }
-
-    // Standard setters and getters
-}
-
-```
-
-In order to be able to pass `Tweet` objects directly between Pulsar Functions, you'll need to provide a custom SerDe class. In the example below, `Tweet` objects are basically strings in which the username and tweet content are separated by a `|`.
-
-```java
-
-package com.example.serde;
-
-import org.apache.pulsar.functions.api.SerDe;
-
-import java.util.regex.Pattern;
-
-public class TweetSerde implements SerDe<Tweet> {
-    public Tweet deserialize(byte[] input) {
-        String s = new String(input);
-        String[] fields = s.split(Pattern.quote("|"));
-        return new Tweet(fields[0], fields[1]);
-    }
-
-    public byte[] serialize(Tweet input) {
-        return "%s|%s".format(input.getUsername(), input.getTweetContent()).getBytes();
-    }
-}
-
-```
-
-To apply this custom SerDe to a particular Pulsar Function, you would need to:
-
-* Package the `Tweet` and `TweetSerde` classes into a JAR
-* Specify a path to the JAR and SerDe class name when deploying the function
-
-Here's an example [`create`](reference-pulsar-admin.md#create-1) operation:
-
-```bash
-
-$ bin/pulsar-admin functions create \
-  --jar /path/to/your.jar \
-  --output-serde-classname com.example.serde.TweetSerde \
-  # Other function attributes
-
-```
-
-> #### Custom SerDe classes must be packaged with your function JARs
-> Pulsar does not store your custom SerDe classes separately from your Pulsar Functions. That means that you'll need to always include your SerDe classes in your function JARs. If not, Pulsar will return an error.
-
-### Java logging
-
-Pulsar Functions that use the [Java SDK](#java-sdk-functions) have access to an [SLF4j](https://www.slf4j.org/) [`Logger`](https://www.slf4j.org/api/org/apache/log4j/Logger.html) object that can be used to produce logs at the chosen log level. Here's a simple example function that logs either a `WARNING`- or `INFO`-level log based on whether the incoming string contains the word `danger`:
-
-```java
-
-import org.apache.pulsar.functions.api.Context;
-import org.apache.pulsar.functions.api.Function;
-import org.slf4j.Logger;
-
-public class LoggingFunction implements Function<String, Void> {
-    @Override
-    public void apply(String input, Context context) {
-        Logger LOG = context.getLogger();
-        String messageId = new String(context.getMessageId());
-
-        if (input.contains("danger")) {
-            LOG.warn("A warning was received in message {}", messageId);
-        } else {
-            LOG.info("Message {} received\nContent: {}", messageId, input);
-        }
-
-        return null;
-    }
-}
-
-```
-
-If you want your function to produce logs, you need to specify a log topic when creating or running the function. Here's an example:
-
-```bash
-
-$ bin/pulsar-admin functions create \
-  --jar my-functions.jar \
-  --classname my.package.LoggingFunction \
-  --log-topic persistent://public/default/logging-function-logs \
-  # Other function configs
-
-```
-
-Now, all logs produced by the `LoggingFunction` above can be accessed via the `persistent://public/default/logging-function-logs` topic.
-
-### Java user config
-
-The Java SDK's [`Context`](#context) object enables you to access key/value pairs provided to the Pulsar Function via the command line (as JSON). Here's an example function creation command that passes a key/value pair:
-
-```bash
-
-$ bin/pulsar-admin functions create \
-  # Other function configs
-  --user-config '{"word-of-the-day":"verdure"}'
-
-```
-
-To access that value in a Java function:
-
-```java
-
-import org.apache.pulsar.functions.api.Context;
-import org.apache.pulsar.functions.api.Function;
-import org.slf4j.Logger;
-
-import java.util.Optional;
-
-public class UserConfigFunction implements Function<String, Void> {
-    @Override
-    public void apply(String input, Context context) {
-        Logger LOG = context.getLogger();
-        Optional<String> wotd = context.getUserConfigValue("word-of-the-day");
-        if (wotd.isPresent()) {
-            LOG.info("The word of the day is {}", wotd);
-        } else {
-            LOG.warn("No word of the day provided");
-        }
-        return null;
-    }
-}
-
-```
-
-The `UserConfigFunction` function will log the string `"The word of the day is verdure"` every time the function is invoked (i.e. every time a message arrives). The `word-of-the-day` user config will be changed only when the function is updated with a new config value via the command line.
-
-You can also access the entire user config map or set a default value in case no value is present:
-
-```java
-
-// Get the whole config map
-Map<String, String> allConfigs = context.getUserConfigMap();
-
-// Get value or resort to default
-String wotd = context.getUserConfigValueOrDefault("word-of-the-day", "perspicacious");
-
-```
-
-> For all key/value pairs passed to Java Pulsar Functions, both the key *and* the value are `String`s. If you'd like the value to be of a different type, you will need to deserialize from the `String` type.
-
-### Java metrics
-
-You can record metrics using the [`Context`](#context) object on a per-key basis. You can, for example, set a metric for the key `process-count` and a different metric for the key `elevens-count` every time the function processes a message. Here's an example:
-
-```java
-
-import org.apache.pulsar.functions.api.Context;
-import org.apache.pulsar.functions.api.Function;
-
-public class MetricRecorderFunction implements Function<Integer, Void> {
-    @Override
-    public void apply(Integer input, Context context) {
-        // Records the metric 1 every time a message arrives
-        context.recordMetric("hit-count", 1);
-
-        // Records the metric only if the arriving number equals 11
-        if (input == 11) {
-            context.recordMetric("elevens-count", 1);
-        }
-
-        return null;
-    }
-}
-
-```
-
-> For instructions on reading and using metrics, see the [Monitoring](deploy-monitoring) guide.
-
-
-## Functions for Python
-
-Writing Pulsar Functions in Python entails implementing one of two things:
-
-* A `process` function that takes an input (message data from the function's input topic(s)), applies some kind of logic to it, and either returns an object (to be published to the function's output topic) or `pass`es and thus doesn't produce a message
-* A `Function` class that has a `process` method that provides a message input to process and a [context](#context) object
-
-### Get started
-
-Regardless of which [deployment mode](functions-deploying) you're using, 'pulsar-client' python library has to installed on any machine that's running Pulsar Functions written in Python.
-
-That could be your local machine for [local run mode](functions-deploying.md#local-run-mode) or a machine running a Pulsar [broker](reference-terminology.md#broker) for [cluster mode](functions-deploying.md#cluster-mode). To install those libraries using pip:
-
-```bash
-
-$ pip install pulsar-client
-
-```
-
-### Packaging
-
-At the moment, the code for Pulsar Functions written in Python must be contained within a single Python file. In the future, Pulsar Functions may support other packaging formats, such as [**P**ython **EX**ecutables](https://github.com/pantsbuild/pex) (PEXes).
-
-### Python native functions
-
-If your function doesn't require access to its [context](#context), you can create a Pulsar Function by implementing a `process` function, which provides a single input object that you can process however you wish. Here's an example function that takes a string as its input, adds an exclamation point at the end of the string, and then publishes the resulting string:
-
-```python
-
-def process(input):
-    return "{0}!".format(input)
-
-```
-
-In general, you should use native functions when you don't need access to the function's [context](#context). If you *do* need access to the function's context, then we recommend using the [Pulsar Functions Python SDK](#python-sdk-functions).
-
-#### Python native examples
-
-There is one example Python native function in this {@inject: github:folder:/pulsar-functions/python-examples}:
-
-* {@inject: github:`native_exclamation_function.py`:/pulsar-functions/python-examples/native_exclamation_function.py}
-
-### Python SDK functions
-
-To get started developing Pulsar Functions using the Python SDK, you'll need to install the [`pulsar-client`](/api/python) library using the instructions [above](#getting-started).
-
-#### Python SDK examples
-
-There are several example Python functions in this {@inject: github:folder:/pulsar-functions/python-examples}:
-
-Function file | Description
-:-------------|:-----------
-[`exclamation_function.py`](https://github.com/apache/pulsar/blob/master/pulsar-functions/python-examples/exclamation_function.py) | Adds an exclamation point at the end of each incoming string [`logging_function.py`](https://github.com/apache/pulsar/blob/master/pulsar-functions/python-examples/logging_function.py) | Logs each incoming message [`thumbnailer.py`](https://github.com/apache/pulsar/blob/master/pulsar-functions/python-examples/thumbnailer.py) | Takes image data as input and o [...]
-
-#### Python context object
-
-The [`Context`](https://github.com/apache/pulsar/blob/master/pulsar-client-cpp/python/pulsar/functions/context.py) class provides a number of methods that you can use to access the function's [context](#context). The various methods for the `Context` class are listed below:
-
-Method | What it provides
-:------|:----------------
-`get_message_id` | The message ID of the message being processed
-`get_message_key` | The key of the message being processed
-`get_message_eventtime` | The event time of the message being processed
-`get_message_properties` | The properties of the message being processed
-`get_current_message_topic_name` | The topic of the message being currently being processed
-`get_function_tenant` | The tenant under which the current Pulsar Function runs under
-`get_function_namespace` | The namespace under which the current Pulsar Function runs under
-`get_function_name` | The name of the current Pulsar Function
-`get_function_id` | The ID of the current Pulsar Function
-`get_instance_id` | The ID of the current Pulsar Functions instance
-`get_function_version` | The version of the current Pulsar Function
-`get_logger` | A logger object that can be used for [logging](#python-logging)
-`get_user_config_value` | Returns the value of a [user-defined config](#python-user-config) (or `None` if the config doesn't exist)
-`get_user_config_map` | Returns the entire user-defined config as a dict
-`get_secret` | The secret value associated with the name
-`get_partition_key` | The partition key of the input message
-`record_metric` | Records a per-key [metric](#python-metrics)
-`publish` | Publishes a message to the specified Pulsar topic
-`get_output_serde_class_name` | The name of the output [SerDe](#python-serde) class
-`ack` | [Acks](reference-terminology.md#acknowledgment-ack) the message being processed to Pulsar
-`incr_counter` | Increase the counter of a given key in the managed state
-`get_counter` | Get the counter of a given key in the managed state
-`del_counter` | Delete the counter of a given key in the managed state
-`put_state` | Update the value of a given key in the managed state
-`get_state` | Get the value of a given key in the managed state
-
-### Python SerDe
-
-Pulsar Functions use [SerDe](#serialization-and-deserialization-serde) when publishing data to and consuming data from Pulsar topics (this is true of both [native](#python-native-functions) functions and [SDK](#python-sdk-functions) functions). You can specify the SerDe when [creating](functions-deploying.md#cluster-mode) or [running](functions-deploying.md#local-run-mode) functions. Here's an example:
-
-```bash
-
-$ bin/pulsar-admin functions create \
-  --tenant public \
-  --namespace default \
-  --name my_function \
-  --py my_function.py \
-  --classname my_function.MyFunction \
-  --custom-serde-inputs '{"input-topic-1":"Serde1","input-topic-2":"Serde2"}' \
-  --output-serde-classname Serde3 \
-  --output output-topic-1
-
-```
-
-In this case, there are two input topics, `input-topic-1` and `input-topic-2`, each of which is mapped to a different SerDe class (the map must be specified as a JSON string). The output topic, `output-topic-1`, uses the `Serde3` class for SerDe. At the moment, all Pulsar Function logic, include processing function and SerDe classes, must be contained within a single Python file.
-
-When using Pulsar Functions for Python, you essentially have three SerDe options:
-
-1. You can use the [`IdentitySerde`](https://github.com/apache/pulsar/blob/master/pulsar-client-cpp/python/pulsar/functions/serde.py#L70), which leaves the data unchanged. The `IdentitySerDe` is the **default**. Creating or running a function without explicitly specifying SerDe will mean that this option is used.
-2. You can use the [`PickeSerDe`](https://github.com/apache/pulsar/blob/master/pulsar-client-cpp/python/pulsar/functions/serde.py#L62), which uses Python's [`pickle`](https://docs.python.org/3/library/pickle.html) for SerDe.
-3. You can create a custom SerDe class by implementing the baseline [`SerDe`](https://github.com/apache/pulsar/blob/master/pulsar-client-cpp/python/pulsar/functions/serde.py#L50) class, which has just two methods: [`serialize`](https://github.com/apache/pulsar/blob/master/pulsar-client-cpp/python/pulsar/functions/serde.py#L53) for converting the object into bytes, and [`deserialize`](https://github.com/apache/pulsar/blob/master/pulsar-client-cpp/python/pulsar/functions/serde.py#L58) for  [...]
-
-The table below shows when you should use each SerDe:
-
-SerDe option | When to use
-:------------|:-----------
-`IdentitySerde` | When you're working with simple types like strings, Booleans, integers, and the like
-`PickleSerDe` | When you're working with complex, application-specific types and are comfortable with `pickle`'s "best effort" approach
-Custom SerDe | When you require explicit control over SerDe, potentially for performance or data compatibility purposes
-
-#### Python SerDe example
-
-Imagine that you're writing Pulsar Functions in Python that are processing tweet objects. Here's a simple `Tweet` class:
-
-```python
-
-class Tweet(object):
-    def __init__(self, username, tweet_content):
-        self.username = username
-        self.tweet_content = tweet_content
-
-```
-
-In order to use this class in Pulsar Functions, you'd have two options:
-
-1. You could specify `PickleSerDe`, which would apply the [`pickle`](https://docs.python.org/3/library/pickle.html) library's SerDe
-1. You could create your own SerDe class. Here's a simple example:
-
-  ```python
-  
-  from pulsar import SerDe
-
-  class TweetSerDe(SerDe):
-     def __init__(self, tweet):
-         self.tweet = tweet
-
-     def serialize(self, input):
-         return bytes("{0}|{1}".format(self.tweet.username, self.tweet.tweet_content))
-
-     def deserialize(self, input_bytes):
-         tweet_components = str(input_bytes).split('|')
-         return Tweet(tweet_components[0], tweet_componentsp[1])
-  
-  ```
-
-### Python logging
-
-Pulsar Functions that use the [Python SDK](#python-sdk-functions) have access to a logging object that can be used to produce logs at the chosen log level. Here's a simple example function that logs either a `WARNING`- or `INFO`-level log based on whether the incoming string contains the word `danger`:
-
-```python
-
-from pulsar import Function
-
-class LoggingFunction(Function):
-    def process(self, input, context):
-        logger = context.get_logger()
-        msg_id = context.get_message_id()
-        if 'danger' in input:
-            logger.warn("A warning was received in message {0}".format(context.get_message_id()))
-        else:
-            logger.info("Message {0} received\nContent: {1}".format(msg_id, input))
-
-```
-
-If you want your function to produce logs on a Pulsar topic, you need to specify a **log topic** when creating or running the function. Here's an example:
-
-```bash
-
-$ bin/pulsar-admin functions create \
-  --py logging_function.py \
-  --classname logging_function.LoggingFunction \
-  --log-topic logging-function-logs \
-  # Other function configs
-
-```
-
-Now, all logs produced by the `LoggingFunction` above can be accessed via the `logging-function-logs` topic.
-
-### Python user config
-
-The Python SDK's [`Context`](#context) object enables you to access key/value pairs provided to the Pulsar Function via the command line (as JSON). Here's an example function creation command that passes a key/value pair:
-
-```bash
-
-$ bin/pulsar-admin functions create \
-  # Other function configs \
-  --user-config '{"word-of-the-day":"verdure"}'
-
-```
-
-To access that value in a Python function:
-
-```python
-
-from pulsar import Function
-
-class UserConfigFunction(Function):
-    def process(self, input, context):
-        logger = context.get_logger()
-        wotd = context.get_user_config_value('word-of-the-day')
-        if wotd is None:
-            logger.warn('No word of the day provided')
-        else:
-            logger.info("The word of the day is {0}".format(wotd))
-
-```
-
-### Python metrics
-
-You can record metrics using the [`Context`](#context) object on a per-key basis. You can, for example, set a metric for the key `process-count` and a different metric for the key `elevens-count` every time the function processes a message. Here's an example:
-
-```python
-
-from pulsar import Function
-
-class MetricRecorderFunction(Function):
-    def process(self, input, context):
-        context.record_metric('hit-count', 1)
-
-        if input == 11:
-            context.record_metric('elevens-count', 1)
-
-```
-
diff --git a/site2/website-next/versioned_docs/version-2.2.1/functions-deploying.md b/site2/website-next/versioned_docs/version-2.2.1/functions-deploying.md
index 8ad8dbe..fabdbb7 100644
--- a/site2/website-next/versioned_docs/version-2.2.1/functions-deploying.md
+++ b/site2/website-next/versioned_docs/version-2.2.1/functions-deploying.md
@@ -2,260 +2,4 @@
 id: functions-deploying
 title: Deploying and managing Pulsar Functions
 sidebar_label: "Deploying functions"
-original_id: functions-deploying
 ---
-
-At the moment, there are two deployment modes available for Pulsar Functions:
-
-Mode | Description
-:----|:-----------
-Local run mode | The function runs in your local environment, for example on your laptop
-Cluster mode | The function runs *inside of* your Pulsar cluster, on the same machines as your Pulsar brokers
-
-> #### Contributing new deployment modes
-> The Pulsar Functions feature was designed, however, with extensibility in mind. Other deployment options will be available in the future. If you'd like to add a new deployment option, we recommend getting in touch with the Pulsar developer community at [dev@pulsar.apache.org](mailto:dev@pulsar.apache.org).
-
-## Requirements
-
-In order to deploy and manage Pulsar Functions, you need to have a Pulsar cluster running. There are several options for this:
-
-* You can run a [standalone cluster](getting-started-standalone) locally on your own machine
-* You can deploy a Pulsar cluster on [Kubernetes](deploy-kubernetes.md), [Amazon Web Services](deploy-aws.md), [bare metal](deploy-bare-metal.md), [DC/OS](deploy-dcos), and more
-
-If you're running a non-[standalone](reference-terminology.md#standalone) cluster, you'll need to obtain the service URL for the cluster. How you obtain the service URL will depend on how you deployed your Pulsar cluster.
-
-If you're going to deploy and trigger python user-defined functions, you should install [the pulsar python client](http://pulsar.apache.org/docs/en/client-libraries-python/) first.
-
-## Command-line interface
-
-Pulsar Functions are deployed and managed using the [`pulsar-admin functions`](reference-pulsar-admin.md#functions) interface, which contains commands such as [`create`](reference-pulsar-admin.md#functions-create) for deploying functions in [cluster mode](#cluster-mode), [`trigger`](reference-pulsar-admin.md#trigger) for [triggering](#triggering-pulsar-functions) functions, [`list`](reference-pulsar-admin.md#list-2) for listing deployed functions, and several others.
-
-### Fully Qualified Function Name (FQFN)
-
-Each Pulsar Function has a **Fully Qualified Function Name** (FQFN) that consists of three elements: the function's tenant, namespace, and function name. FQFN's look like this:
-
-```http
-
-tenant/namespace/name
-
-```
-
-FQFNs enable you to, for example, create multiple functions with the same name provided that they're in different namespaces.
-
-### Default arguments
-
-When managing Pulsar Functions, you'll need to specify a variety of information about those functions, including tenant, namespace, input and output topics, etc. There are some parameters, however, that have default values that will be supplied if omitted. The table below lists the defaults:
-
-Parameter | Default
-:---------|:-------
-Function name | Whichever value is specified for the class name (minus org, library, etc.). The flag `--classname org.example.MyFunction`, for example, would give the function a name of `MyFunction`.
-Tenant | Derived from the input topics' names. If the input topics are under the `marketing` tenant---i.e. the topic names have the form `persistent://marketing/{namespace}/{topicName}`---then the tenant will be `marketing`.
-Namespace | Derived from the input topics' names. If the input topics are under the `asia` namespace under the `marketing` tenant---i.e. the topic names have the form `persistent://marketing/asia/{topicName}`, then the namespace will be `asia`.
-Output topic | `{input topic}-{function name}-output`. A function with an input topic name of `incoming` and a function name of `exclamation`, for example, would have an output topic of `incoming-exclamation-output`.
-Subscription type | For at-least-once and at-most-once [processing guarantees](functions-guarantees), the [`SHARED`](concepts-messaging.md#shared) is applied by default; for effectively-once guarantees, [`FAILOVER`](concepts-messaging.md#failover) is applied
-Processing guarantees | [`ATLEAST_ONCE`](functions-guarantees)
-Pulsar service URL | `pulsar://localhost:6650`
-
-#### Example use of defaults
-
-Take this `create` command:
-
-```bash
-
-$ bin/pulsar-admin functions create \
-  --jar my-pulsar-functions.jar \
-  --classname org.example.MyFunction \
-  --inputs my-function-input-topic1,my-function-input-topic2
-
-```
-
-The created function would have default values supplied for the function name (`MyFunction`), tenant (`public`), namespace (`default`), subscription type (`SHARED`), processing guarantees (`ATLEAST_ONCE`), and Pulsar service URL (`pulsar://localhost:6650`).
-
-## Local run mode
-
-If you run a Pulsar Function in **local run** mode, it will run on the machine from which the command is run (this could be your laptop, an [AWS EC2](https://aws.amazon.com/ec2/) instance, etc.). Here's an example [`localrun`](reference-pulsar-admin.md#localrun) command:
-
-```bash
-
-$ bin/pulsar-admin functions localrun \
-  --py myfunc.py \
-  --classname myfunc.SomeFunction \
-  --inputs persistent://public/default/input-1 \
-  --output persistent://public/default/output-1
-
-```
-
-By default, the function will connect to a Pulsar cluster running on the same machine, via a local [broker](reference-terminology.md#broker) service URL of `pulsar://localhost:6650`. If you'd like to use local run mode to run a function but connect it to a non-local Pulsar cluster, you can specify a different broker URL using the `--brokerServiceUrl` flag. Here's an example:
-
-```bash
-
-$ bin/pulsar-admin functions localrun \
-  --broker-service-url pulsar://my-cluster-host:6650 \
-  # Other function parameters
-
-```
-
-## Cluster mode
-
-When you run a Pulsar Function in **cluster mode**, the function code will be uploaded to a Pulsar broker and run *alongside the broker* rather than in your [local environment](#local-run-mode). You can run a function in cluster mode using the [`create`](reference-pulsar-admin.md#create-1) command. Here's an example:
-
-```bash
-
-$ bin/pulsar-admin functions create \
-  --py myfunc.py \
-  --classname myfunc.SomeFunction \
-  --inputs persistent://public/default/input-1 \
-  --output persistent://public/default/output-1
-
-```
-
-### Updating cluster mode functions
-
-You can use the [`update`](reference-pulsar-admin.md#update-1) command to update a Pulsar Function running in cluster mode. This command, for example, would update the function created in the section [above](#cluster-mode):
-
-```bash
-
-$ bin/pulsar-admin functions update \
-  --py myfunc.py \
-  --classname myfunc.SomeFunction \
-  --inputs persistent://public/default/new-input-topic \
-  --output persistent://public/default/new-output-topic
-
-```
-
-### Parallelism
-
-Pulsar Functions run as processes called **instances**. When you run a Pulsar Function, it runs as a single instance by default (and in [local run mode](#local-run-mode) you can *only* run a single instance of a function).
-
-You can also specify the *parallelism* of a function, i.e. the number of instances to run, when you create the function. You can set the parallelism factor using the `--parallelism` flag of the [`create`](reference-pulsar-admin.md#functions-create) command. Here's an example:
-
-```bash
-
-$ bin/pulsar-admin functions create \
-  --parallelism 3 \
-  # Other function info
-
-```
-
-You can adjust the parallelism of an already created function using the [`update`](reference-pulsar-admin.md#update-1) interface.
-
-```bash
-
-$ bin/pulsar-admin functions update \
-  --parallelism 5 \
-  # Other function
-
-```
-
-If you're specifying a function's configuration via YAML, use the `parallelism` parameter. Here's an example config file:
-
-```yaml
-
-# function-config.yaml
-parallelism: 3
-inputs:
-- persistent://public/default/input-1
-output: persistent://public/default/output-1
-# other parameters
-
-```
-
-And here's the corresponding update command:
-
-```bash
-
-$ bin/pulsar-admin functions update \
-  --function-config-file function-config.yaml
-
-```
-
-### Function instance resources
-
-When you run Pulsar Functions in [cluster run](#cluster-mode) mode, you can specify the resources that are assigned to each function [instance](#parallelism):
-
-Resource | Specified as... | Runtimes
-:--------|:----------------|:--------
-CPU | The number of cores | Docker (coming soon)
-RAM | The number of bytes | Process, Docker
-Disk space | The number of bytes | Docker
-
-Here's an example function creation command that allocates 8 cores, 8 GB of RAM, and 10 GB of disk space to a function:
-
-```bash
-
-$ bin/pulsar-admin functions create \
-  --jar target/my-functions.jar \
-  --classname org.example.functions.MyFunction \
-  --cpu 8 \
-  --ram 8589934592 \
-  --disk 10737418240
-
-```
-
-> #### Resources are *per instance*
-> The resources that you apply to a given Pulsar Function are applied to each [instance](#parallelism) of the function. If you apply 8 GB of RAM to a function with a parallelism of 5, for example, then you are applying 40 GB of RAM total for the function. You should always make sure to factor parallelism---i.e. the number of instances---into your resource calculations
-
-## Triggering Pulsar Functions
-
-If a Pulsar Function is running in [cluster mode](#cluster-mode), you can **trigger** it at any time using the command line. Triggering a function means that you send a message with a specific value to the function and get the function's output (if any) via the command line.
-
-> Triggering a function is ultimately no different from invoking a function by producing a message on one of the function's input topics. The [`pulsar-admin functions trigger`](reference-pulsar-admin.md#trigger) command is essentially a convenient mechanism for sending messages to functions without needing to use the [`pulsar-client`](reference-cli-tools.md#pulsar-client) tool or a language-specific client library.
-
-To show an example of function triggering, let's start with a simple [Python function](functions-api.md#functions-for-python) that returns a simple string based on the input:
-
-```python
-
-# myfunc.py
-def process(input):
-    return "This function has been triggered with a value of {0}".format(input)
-
-```
-
-Let's run that function in [local run mode](functions-deploying.md#local-run-mode):
-
-```bash
-
-$ bin/pulsar-admin functions create \
-  --tenant public \
-  --namespace default \
-  --name myfunc \
-  --py myfunc.py \
-  --classname myfunc \
-  --inputs persistent://public/default/in \
-  --output persistent://public/default/out
-
-```
-
-Now let's make a consumer listen on the output topic for messages coming from the `myfunc` function using the [`pulsar-client consume`](reference-cli-tools.md#consume) command:
-
-```bash
-
-$ bin/pulsar-client consume persistent://public/default/out \
-  --subscription-name my-subscription
-  --num-messages 0 # Listen indefinitely
-
-```
-
-Now let's trigger that function:
-
-```bash
-
-$ bin/pulsar-admin functions trigger \
-  --tenant public \
-  --namespace default \
-  --name myfunc \
-  --trigger-value "hello world"
-
-```
-
-The consumer listening on the output topic should then produce this in its logs:
-
-```
-
------ got message -----
-This function has been triggered with a value of hello world
-
-```
-
-> #### Topic info not required
-> In the `trigger` command above, you may have noticed that you only need to specify basic information about the function (tenant, namespace, and name). To trigger the function, you didn't need to know the function's input topic(s).
diff --git a/site2/website-next/versioned_docs/version-2.2.1/functions-guarantees.md b/site2/website-next/versioned_docs/version-2.2.1/functions-guarantees.md
index d9b1438..aefa8d3 100644
--- a/site2/website-next/versioned_docs/version-2.2.1/functions-guarantees.md
+++ b/site2/website-next/versioned_docs/version-2.2.1/functions-guarantees.md
@@ -2,46 +2,4 @@
 id: functions-guarantees
 title: Processing guarantees
 sidebar_label: "Processing guarantees"
-original_id: functions-guarantees
 ---
-
-Pulsar Functions provides three different messaging semantics that you can apply to any function:
-
-Delivery semantics | Description
-:------------------|:-------
-**At-most-once** delivery | Each message that is sent to the function will most likely be processed but also may not be (hence the "at most")
-**At-least-once** delivery | Each message that is sent to the function could be processed more than once (hence the "at least")
-**Effectively-once** delivery | Each message that is sent to the function will have one output associated with it
-
-## Applying processing guarantees to a function
-
-You can set the processing guarantees for a Pulsar Function when you create the Function. This [`pulsar-function create`](reference-pulsar-admin.md#create-1) command, for example, would apply effectively-once guarantees to the Function:
-
-```bash
-
-$ bin/pulsar-admin functions create \
-  --processing-guarantees EFFECTIVELY_ONCE \
-  # Other function configs
-
-```
-
-The available options are:
-
-* `ATMOST_ONCE`
-* `ATLEAST_ONCE`
-* `EFFECTIVELY_ONCE`
-
-> By default, Pulsar Functions provide at-least-once delivery guarantees. So if you create a function without supplying a value for the `--processingGuarantees` flag, then the function will provide at-least-once guarantees.
-
-## Updating the processing guarantees of a function
-
-You can change the processing guarantees applied to a function once it's already been created using the [`update`](reference-pulsar-admin.md#update-1) command. Here's an example:
-
-```bash
-
-$ bin/pulsar-admin functions update \
-  --processing-guarantees ATMOST_ONCE \
-  # Other function configs
-
-```
-
diff --git a/site2/website-next/versioned_docs/version-2.2.1/functions-metrics.md b/site2/website-next/versioned_docs/version-2.2.1/functions-metrics.md
index 8add669..e76c556 100644
--- a/site2/website-next/versioned_docs/version-2.2.1/functions-metrics.md
+++ b/site2/website-next/versioned_docs/version-2.2.1/functions-metrics.md
@@ -2,6 +2,5 @@
 id: functions-metrics
 title: Metrics for Pulsar Functions
 sidebar_label: "Metrics"
-original_id: functions-metrics
 ---
 
diff --git a/site2/website-next/versioned_docs/version-2.2.1/functions-quickstart.md b/site2/website-next/versioned_docs/version-2.2.1/functions-quickstart.md
index d4d4aff..722d5bc 100644
--- a/site2/website-next/versioned_docs/version-2.2.1/functions-quickstart.md
+++ b/site2/website-next/versioned_docs/version-2.2.1/functions-quickstart.md
@@ -1,458 +1,5 @@
 ---
 id: functions-quickstart
-title: Get started with Pulsar Functions
-sidebar_label: "Get started"
-original_id: functions-quickstart
----
-
-This tutorial walks you through running a [standalone](reference-terminology.md#standalone) Pulsar [cluster](reference-terminology.md#cluster) on your machine, and then running your first Pulsar Function using that cluster. The first Pulsar Function runs in local run mode (outside your Pulsar [cluster](reference-terminology.md#cluster)), while the second runs in cluster mode (inside your cluster).
-
-> In local run mode, Pulsar Functions communicate with Pulsar cluster, but run outside of the cluster.
-
-## Prerequisites
-
-Install [Maven](https://maven.apache.org/download.cgi) on your machine.
-
-## Run a standalone Pulsar cluster
-
-In order to run Pulsar Functions, you need to run a Pulsar cluster locally first. The easiest way is to run Pulsar in [standalone](reference-terminology.md#standalone) mode. Follow these steps to start up a standalone cluster.
-
-```bash
-
-$ wget pulsar:binary_release_url
-$ tar xvfz apache-pulsar-@pulsar:version@-bin.tar.gz
-$ cd apache-pulsar-@pulsar:version@
-$ bin/pulsar standalone \
-  --advertised-address 127.0.0.1
-
-```
-
-When running Pulsar in standalone mode, the `public` tenant and the `default` namespace are created automatically. The tenant and namespace are used throughout this tutorial.
-
-## Run a Pulsar Function in local run mode
-
-You can start with a simple function that takes a string as input from a Pulsar topic, adds an exclamation point to the end of the string, and then publishes the new string to another Pulsar topic. The following is the code for the function.
-
-```java
-
-package org.apache.pulsar.functions.api.examples;
-
-import java.util.function.Function;
-
-public class ExclamationFunction implements Function<String, String> {
-    @Override
-    public String apply(String input) {
-        return String.format("%s!", input);
-    }
-}
-
-```
-
-A JAR file containing this function and several other functions (written in Java) is included with the binary distribution you have downloaded (in the `examples` folder). Run the function in local mode on your laptop but outside your Pulsar cluster with the following commands.
-
-```bash
-
-$ bin/pulsar-admin functions localrun \
-  --jar examples/api-examples.jar \
-  --classname org.apache.pulsar.functions.api.examples.ExclamationFunction \
-  --inputs persistent://public/default/exclamation-input \
-  --output persistent://public/default/exclamation-output \
-  --name exclamation
-
-```
-
-> #### Multiple input topics
->
-> In the example above, a single topic is specified using the `--inputs` flag. You can also specify multiple input topics with a comma-separated list using the same flag. 
->
-
-> ```bash
-> 
-> --inputs topic1,topic2
->
-> 
-> ```
-
-
-You can open up another shell and use the [`pulsar-client`](reference-cli-tools.md#pulsar-client) tool to listen for messages on the output topic.
-
-```bash
-
-$ bin/pulsar-client consume persistent://public/default/exclamation-output \
-  --subscription-name my-subscription \
-  --num-messages 0
-
-```
-
-> Setting the `--num-messages` flag to `0` means that consumers listen on the topic indefinitely, rather than only accepting a certain number of messages.
-
-With a listener up and running, you can open up another shell and produce a message on the input topic that you specify.
-
-```bash
-
-$ bin/pulsar-client produce persistent://public/default/exclamation-input \
-  --num-produce 1 \
-  --messages "Hello world"
-
-```
-
-When the message has been successfully processed by the exclamation function, you will see the following output. To shut down the function, press **Ctrl+C**.
-
-```
-
------ got message -----
-Hello world!
-
-```
-
-### Process explanation
-
-* The `Hello world` message you publish to the input topic (`persistent://public/default/exclamation-input`) is passed to the exclamation function.
-* The exclamation function processes the message (providing a result of `Hello world!`) and publishes the result to the output topic (`persistent://public/default/exclamation-output`).
-* If the exclamation function *does not* run, Pulsar will durably store the message data published to the input topic in [Apache BookKeeper](https://bookkeeper.apache.org) until a consumer consumes and acknowledges the message.
-
-## Run a Pulsar Function in cluster mode
-
-[Local run mode](#run-a-pulsar-function-in-local-run-mode) is useful for development and test. However, when you use Pulsar for real deployment, you run it in **cluster mode**. In cluster mode, Pulsar Functions run *inside* of your Pulsar cluster and are managed using the same [`pulsar-admin functions`](reference-pulsar-admin.md#functions) interface.
-
-The following command deploys the same exclamation function you run locally in your Pulsar cluster, rather than outside of it.
-
-```bash
-
-$ bin/pulsar-admin functions create \
-  --jar examples/api-examples.jar \
-  --classname org.apache.pulsar.functions.api.examples.ExclamationFunction \
-  --inputs persistent://public/default/exclamation-input \
-  --output persistent://public/default/exclamation-output \
-  --name exclamation
-
-```
-
-You will see `Created successfully` in the output. Check the list of functions running in your cluster.
-
-```bash
-
-$ bin/pulsar-admin functions list \
-  --tenant public \
-  --namespace default
-
-```
-
-You will see the `exclamation` function. Check the status of your deployed function using the `getstatus` command.
-
-```bash
-
-$ bin/pulsar-admin functions getstatus \
-  --tenant public \
-  --namespace default \
-  --name exclamation
-
-```
-
-You will see the following JSON output.
-
-```json
-
-{
-  "functionStatusList": [
-    {
-      "running": true,
-      "instanceId": "0"
-    }
-  ]
-}
-
-```
-
-As you can see, the instance is currently running, and an instance with the ID of `0` is running. With the `get` command, you can get other information about the function, for example, topics, tenant, namespace, and so on.
-
-```bash
-
-$ bin/pulsar-admin functions get \
-  --tenant public \
-  --namespace default \
-  --name exclamation
-
-```
-
-You will see the following JSON output.
-
-```json
-
-{
-  "tenant": "public",
-  "namespace": "default",
-  "name": "exclamation",
-  "className": "org.apache.pulsar.functions.api.examples.ExclamationFunction",
-  "output": "persistent://public/default/exclamation-output",
-  "autoAck": true,
-  "inputs": [
-    "persistent://public/default/exclamation-input"
-  ],
-  "parallelism": 1
-}
-
-```
-
-As you can see, only one instance of the function is running in your cluster. Update the parallel functions to `3` using the `update` command.
-
-```bash
-
-$ bin/pulsar-admin functions update \
-  --jar examples/api-examples.jar \
-  --classname org.apache.pulsar.functions.api.examples.ExclamationFunction \
-  --inputs persistent://public/default/exclamation-input \
-  --output persistent://public/default/exclamation-output \
-  --tenant public \
-  --namespace default \
-  --name exclamation \
-  --parallelism 3
-
-```
-
-You will see `Updated successfully` in the output. If you enter the `get` command, you see that the parallel functions are increased to `3`, meaning that three instances of the function are running in your cluster.
-
-```json
-
-{
-  "tenant": "public",
-  "namespace": "default",
-  "name": "exclamation",
-  "className": "org.apache.pulsar.functions.api.examples.ExclamationFunction",
-  "output": "persistent://public/default/exclamation-output",
-  "autoAck": true,
-  "inputs": [
-    "persistent://public/default/exclamation-input"
-  ],
-  "parallelism": 3
-}
-
-```
-
-Shut down the running function with the `delete` command.
-
-```bash
-
-$ bin/pulsar-admin functions delete \
-  --tenant public \
-  --namespace default \
-  --name exclamation
-
-```
-
-When you see `Deleted successfully` in the output, you've successfully run, updated, and shut down functions running in cluster mode. 
-
-## Write and run a new function
-
-In order to write and run [Python](functions-api.md#functions-for-python) functions, you need to install some dependencies.
-
-```bash
-
-$ pip install pulsar-client
-
-```
-
-In the examples above, you run and manage pre-written Pulsar Functions and learn how they work. You can also write your own functions with Python API. In the following example, the function takes a string as input, reverses the string, and publishes the reversed string to the specified topic.
-
-First, create a new Python file.
-
-```bash
-
-$ touch reverse.py
-
-```
-
-Add the following information in the Python file.
-
-```python
-
-def process(input):
-    return input[::-1]
-
-```
-
-The `process` method defines the processing logic of Pulsar Functions. It uses Python slice magic to reverse each incoming string. You can deploy the function using the `create` command.
-
-```bash
-
-$ bin/pulsar-admin functions create \
-  --py reverse.py \
-  --classname reverse \
-  --inputs persistent://public/default/backwards \
-  --output persistent://public/default/forwards \
-  --tenant public \
-  --namespace default \
-  --name reverse
-
-```
-
-If you see `Created successfully`, the function is ready to accept incoming messages. Because the function is running in cluster mode, you can **trigger** the function using the [`trigger`](reference-pulsar-admin.md#trigger) command. This command sends a message that you specify to the function and returns the function output. The following is an example.
-
-```bash
-
-$ bin/pulsar-admin functions trigger \
-  --name reverse \
-  --tenant public \
-  --namespace default \
-  --trigger-value "sdrawrof won si tub sdrawkcab saw gnirts sihT"
-
-```
-
-You will get the following output.
-
-```
-
-This string was backwards but is now forwards
-
-```
-
-You have created a new Pulsar Function, deployed it in your Pulsar standalone cluster in [cluster mode](#run-a-pulsar-function-in-cluster-mode), and triggered the Function. 
-
-## Write and run a Go function
-Go function depends on `pulsar-client-go`. Make sure that you have built `pulsar-client-go` before using Go function.
-
-To write and run a Go function, complete the following steps.
-
-1. Create a new Go file.
-
-```
-
-touch helloFunc.go
-
-```
-
-2. Append a byte for messages from the input topic.    
-The following is a `helloFunc.go` example. Each message from the input topic is appended with a `110` byte, and then delivered to the output topic.
-
-```
-
-package main
-
-import (
-	"context"
-
-	"github.com/apache/pulsar/pulsar-function-go/pf"
-)
-
-func HandleResponse(ctx context.Context, in []byte) ([]byte, error) {
-	res := append(in, 110)
-	return res, nil
-}
-
-func main() {
-	pf.Start(HandleResponse)
-}
-
-```
-
-3. Compile code.
-
-```
-
-go build -o examplepulsar helloFunc.go
-
-```
-
-4. Run Go function. 
-
-```
-
-$ bin/pulsar-admin functions create \
-  --go examplepulsar \
-  --inputs persistent://public/default/backwards \
-  --output persistent://public/default/forwards \
-  --tenant public \
-  --namespace default \
-  --name gofunc
-
-```
-
-If you see `Created successfully`, the function is ready to accept incoming messages. Start a producer and produce messages to the `backwards` input topic. Start a consumer and consume messages from the `forwards` output topic, you will see `110` is appended to all messages.
-
-The `--classname` parameter is not specified when running Go function, because there is no `Class` concept in Go, which is different from Java and Python.
-
-:::note
-
-When you use the `--go` command to specify an executable file, make sure you have executable permissions.
-
-:::
-
-## Package Python dependencies
-
-When you deploy Python functions in a cluster offline, you need to package the required dependencies in a ZIP file before deployment.
-
-### Client requirements
-
-The following programs are required to be installed on the client machine.
-
-```
-
-pip \\ required for getting python dependencies
-zip \\ for building zip archives
-
-```
-
-### Python dependencies
-
-A file named **requirements.txt** is needed with required dependencies for the Python function.
-
-```
-
-sh==1.12.14
-
-```
-
-Prepare the Pulsar Function in the **src** folder.
-
-Run the following command to gather Python dependencies in the **deps** folder.
-
-```
-
-pip download \
---only-binary :all: \
---platform manylinux1_x86_64 \
---python-version 27 \
---implementation cp \
---abi cp27m -r requirements.txt -d deps
-
-```
-
-Sample output
-
-```
-
-Collecting sh==1.12.14 (from -r requirements.txt (line 1))
-  Using cached https://files.pythonhosted.org/packages/4a/22/17b22ef5b049f12080f5815c41bf94de3c229217609e469001a8f80c1b3d/sh-1.12.14-py2.py3-none-any.whl
-  Saved ./deps/sh-1.12.14-py2.py3-none-any.whl
-Successfully downloaded sh
-
-```
-
-:::note
-
-`pulsar-client` is not needed as a dependency as it has already installed in the worker node.
-
-:::
-
-#### Package
-Create a destination folder with the desired package name, for example, **exclamation**. Copy the **src** and **deps** folders into it, and compress the folder into a ZIP archive.
-
-Sample sequence
-
-```
-
-cp -R deps exclamation/
-cp -R src exclamation/
-
-ls -la exclamation/
-total 7
-drwxr-xr-x   5 a.ahmed  staff  160 Nov  6 17:51 .
-drwxr-xr-x  12 a.ahmed  staff  384 Nov  6 17:52 ..
-drwxr-xr-x   3 a.ahmed  staff   96 Nov  6 17:51 deps
-drwxr-xr-x   3 a.ahmed  staff   96 Nov  6 17:51 src
-
-zip -r exclamation.zip exclamation
-
-```
-
-After package all the required dependencies into the **exclamation.zip** file, you can deploy functions in a Pulsar worker. The Pulsar worker does not need internet connectivity to download packages, because they are all included in the ZIP file.
\ No newline at end of file
+title: Getting started with Pulsar Functions
+sidebar_label: "Getting started"
+---
\ No newline at end of file
diff --git a/site2/website-next/versioned_docs/version-2.2.1/functions-state.md b/site2/website-next/versioned_docs/version-2.2.1/functions-state.md
index d3c7c78..a858759 100644
--- a/site2/website-next/versioned_docs/version-2.2.1/functions-state.md
+++ b/site2/website-next/versioned_docs/version-2.2.1/functions-state.md
@@ -2,196 +2,4 @@
 id: functions-state
 title: Pulsar Functions State Storage (Developer Preview)
 sidebar_label: "State Storage"
-original_id: functions-state
 ---
-
-Since Pulsar 2.1.0 release, Pulsar integrates with Apache BookKeeper [table service](https://docs.google.com/document/d/155xAwWv5IdOitHh1NVMEwCMGgB28M3FyMiQSxEpjE-Y/edit#heading=h.56rbh52koe3f)
-for storing the `State` for functions. For example, A `WordCount` function can store its `counters` state into BookKeeper's table service via Pulsar Functions [State API](#api).
-
-## API
-
-### Java API
-
-Currently Pulsar Functions expose following APIs for mutating and accessing State. These APIs are available in the [Context](functions-api.md#context) object when
-you are using [Java SDK](functions-api.md#java-sdk-functions) functions.
-
-#### incrCounter
-
-```java
-
-    /**
-     * Increment the builtin distributed counter referred by key
-     * @param key The name of the key
-     * @param amount The amount to be incremented
-     */
-    void incrCounter(String key, long amount);
-
-```
-
-The application can use `incrCounter` to change the counter of a given `key` by the given `amount`.
-
-#### incrCounterAsync
-
-```java
-
-     /**
-     * Increment the builtin distributed counter referred by key
-     * but dont wait for the completion of the increment operation
-     *
-     * @param key The name of the key
-     * @param amount The amount to be incremented
-     */
-    CompletableFuture<Void> incrCounterAsync(String key, long amount);
-
-```
-
-The application can use `incrCounterAsync` to asynchronously change the counter of a given `key` by the given `amount`.
-
-#### getCounter
-
-```java
-
-    /**
-     * Retrieve the counter value for the key.
-     *
-     * @param key name of the key
-     * @return the amount of the counter value for this key
-     */
-    long getCounter(String key);
-
-```
-
-The application can use `getCounter` to retrieve the counter of a given `key` mutated by `incrCounter`.
-
-Besides the `counter` API, Pulsar also exposes a general key/value API for functions to store
-general key/value state.
-
-#### getCounterAsync
-
-```java
-
-     /**
-     * Retrieve the counter value for the key, but don't wait
-     * for the operation to be completed
-     *
-     * @param key name of the key
-     * @return the amount of the counter value for this key
-     */
-    CompletableFuture<Long> getCounterAsync(String key);
-
-```
-
-The application can use `getCounterAsync` to asynchronously retrieve the counter of a given `key` mutated by `incrCounterAsync`.
-
-#### putState
-
-```java
-
-    /**
-     * Update the state value for the key.
-     *
-     * @param key name of the key
-     * @param value state value of the key
-     */
-    void putState(String key, ByteBuffer value);
-
-```
-
-#### putStateAsync
-
-```java
-
-    /**
-     * Update the state value for the key, but don't wait for the operation to be completed
-     *
-     * @param key name of the key
-     * @param value state value of the key
-     */
-    CompletableFuture<Void> putStateAsync(String key, ByteBuffer value);
-
-```
-
-The application can use `putStateAsync` to asynchronously update the state of a given `key`.
-
-#### getState
-
-```
-
-    /**
-     * Retrieve the state value for the key.
-     *
-     * @param key name of the key
-     * @return the state value for the key.
-     */
-    ByteBuffer getState(String key);
-
-```
-
-#### getStateAsync
-
-```java
-
-    /**
-     * Retrieve the state value for the key, but don't wait for the operation to be completed
-     *
-     * @param key name of the key
-     * @return the state value for the key.
-     */
-    CompletableFuture<ByteBuffer> getStateAsync(String key);
-
-```
-
-The application can use `getStateAsync` to asynchronously retrieve the state of a given `key`.
-
-### Python API
-
-State currently is not supported at [Python SDK](functions-api.md#python-sdk-functions).
-
-## Query State
-
-A Pulsar Function can use the [State API](#api) for storing state into Pulsar's state storage
-and retrieving state back from Pulsar's state storage. Additionally Pulsar also provides
-CLI commands for querying its state.
-
-```shell
-
-$ bin/pulsar-admin functions querystate \
-    --tenant <tenant> \
-    --namespace <namespace> \
-    --name <function-name> \
-    --state-storage-url <bookkeeper-service-url> \
-    --key <state-key> \
-    [---watch]
-
-```
-
-If `--watch` is specified, the CLI will watch the value of the provided `state-key`.
-
-## Example
-
-### Java Example
-
-{@inject: github:WordCountFunction:/pulsar-functions/java-examples/src/main/java/org/apache/pulsar/functions/api/examples/WordCountFunction.java} is a very good example
-demonstrating on how Application can easily store `state` in Pulsar Functions.
-
-```java
-
-public class WordCountFunction implements Function<String, Void> {
-    @Override
-    public Void process(String input, Context context) throws Exception {
-        Arrays.asList(input.split("\\.")).forEach(word -> context.incrCounter(word, 1));
-        return null;
-    }
-}
-
-```
-
-The logic of this `WordCount` function is pretty simple and straightforward:
-
-1. The function first splits the received `String` into multiple words using regex `\\.`.
-2. For each `word`, the function increments the corresponding `counter` by 1 (via `incrCounter(key, amount)`).
-
-### Python Example
-
-State currently is not supported at [Python SDK](functions-api.md#python-sdk-functions).
-
diff --git a/site2/website-next/versioned_docs/version-2.2.1/io-develop.md b/site2/website-next/versioned_docs/version-2.2.1/io-develop.md
index 2328f37..bbd6501 100644
--- a/site2/website-next/versioned_docs/version-2.2.1/io-develop.md
+++ b/site2/website-next/versioned_docs/version-2.2.1/io-develop.md
@@ -2,7 +2,6 @@
 id: io-develop
 title: How to develop Pulsar connectors
 sidebar_label: "Develop"
-original_id: io-develop
 ---
 
 import Tabs from '@theme/Tabs';
diff --git a/site2/website-next/versioned_docs/version-2.2.1/pulsar-2.0.md b/site2/website-next/versioned_docs/version-2.2.1/pulsar-2.0.md
index 11c5e66..560c8c1 100644
--- a/site2/website-next/versioned_docs/version-2.2.1/pulsar-2.0.md
+++ b/site2/website-next/versioned_docs/version-2.2.1/pulsar-2.0.md
@@ -2,7 +2,6 @@
 id: pulsar-2.0
 title: Pulsar 2.0
 sidebar_label: "Pulsar 2.0"
-original_id: pulsar-2.0
 ---
 
 Pulsar 2.0 is a major new release for Pulsar that brings some bold changes to the platform, including [simplified topic names](#topic-names), the addition of the [Pulsar Functions](functions-overview) feature, some terminology changes, and more.
diff --git a/site2/website-next/versioned_docs/version-2.2.1/reference-pulsar-admin.md b/site2/website-next/versioned_docs/version-2.2.1/pulsar-admin.md
similarity index 100%
rename from site2/website-next/versioned_docs/version-2.2.1/reference-pulsar-admin.md
rename to site2/website-next/versioned_docs/version-2.2.1/pulsar-admin.md
diff --git a/site2/website-next/versioned_docs/version-2.2.1/reference-cli-tools.md b/site2/website-next/versioned_docs/version-2.2.1/reference-cli-tools.md
index 3a46361..0c8aea1 100644
--- a/site2/website-next/versioned_docs/version-2.2.1/reference-cli-tools.md
+++ b/site2/website-next/versioned_docs/version-2.2.1/reference-cli-tools.md
@@ -2,7 +2,6 @@
 id: reference-cli-tools
 title: Pulsar command-line tools
 sidebar_label: "Pulsar CLI tools"
-original_id: reference-cli-tools
 ---
 
 Pulsar offers several command-line tools that you can use for managing Pulsar installations, performance testing, using command-line producers and consumers, and more.
@@ -16,8 +15,12 @@ All Pulsar command-line tools can be run from the `bin` directory of your [insta
 * [`bookkeeper`](#bookkeeper)
 * [`broker-tool`](#broker-tool)
 
-> ### Getting help
-> You can get help for any CLI tool, command, or subcommand using the `--help` flag, or `-h` for short. Here's an example:
+> **Important** 
+>
+> - This page only shows **some frequently used commands**. For the latest information about `pulsar`, `pulsar-client`, and `pulsar-perf`, including commands, flags, descriptions, and more information, see [Pulsar tools](https://pulsar.apache.org/tools/).
+>  
+> - You can get help for any CLI tool, command, or subcommand using the `--help` flag, or `-h` for short. Here's an example:
+> 
 
 > ```shell
 > 
@@ -45,7 +48,6 @@ Commands:
 * `bookie`
 * `broker`
 * `compact-topic`
-* `discovery`
 * `configuration-store`
 * `initialize-cluster-metadata`
 * `proxy`
@@ -53,6 +55,7 @@ Commands:
 * `websocket`
 * `zookeeper`
 * `zookeeper-shell`
+* `autorecovery`
 
 Example:
 
@@ -71,14 +74,13 @@ The table below lists the environment variables that you can use to configure th
 |`PULSAR_BOOKKEEPER_CONF`|description: Configuration file for bookie|`conf/bookkeeper.conf`|
 |`PULSAR_ZK_CONF`|Configuration file for zookeeper|`conf/zookeeper.conf`|
 |`PULSAR_CONFIGURATION_STORE_CONF`|Configuration file for the configuration store|`conf/global_zookeeper.conf`|
-|`PULSAR_DISCOVERY_CONF`|Configuration file for discovery service|`conf/discovery.conf`|
 |`PULSAR_WEBSOCKET_CONF`|Configuration file for websocket proxy|`conf/websocket.conf`|
 |`PULSAR_STANDALONE_CONF`|Configuration file for standalone|`conf/standalone.conf`|
 |`PULSAR_EXTRA_OPTS`|Extra options to be passed to the jvm||
 |`PULSAR_EXTRA_CLASSPATH`|Extra paths for Pulsar's classpath||
 |`PULSAR_PID_DIR`|Folder where the pulsar server PID file should be stored||
 |`PULSAR_STOP_TIMEOUT`|Wait time before forcefully killing the Bookie server instance if attempts to stop it are not successful||
-
+|`PULSAR_GC_LOG`|Gc options to be passed to the jvm||
 
 
 ### `bookie`
@@ -165,26 +167,6 @@ $ pulsar compact-topic --topic topic-to-compact
 
 ```
 
-### `discovery`
-
-Run a discovery server
-
-Usage
-
-```bash
-
-$ pulsar discovery
-
-```
-
-Example
-
-```bash
-
-$ PULSAR_DISCOVERY_CONF=/path/to/discovery.conf pulsar discovery
-
-```
-
 ### `configuration-store`
 
 Starts up the Pulsar configuration store
@@ -224,14 +206,14 @@ Options
 |`-ub` , `--broker-service-url`|The broker service URL for the new cluster||
 |`-tb` , `--broker-service-url-tls`|The broker service URL for the new cluster with TLS encryption||
 |`-c` , `--cluster`|Cluster name||
-|`-cs` , `--configuration-store`|The configuration store quorum connection string||
+|`-cms` , `--configuration-metadata-store`|The configuration metadata store quorum connection string||
 |`--existing-bk-metadata-service-uri`|The metadata service URI of the existing BookKeeper cluster that you want to use||
 |`-h` , `--help`|Cluster name|false|
 |`--initial-num-stream-storage-containers`|The number of storage containers of BookKeeper stream storage|16|
 |`--initial-num-transaction-coordinators`|The number of transaction coordinators assigned in a cluster|16|
 |`-uw` , `--web-service-url`|The web service URL for the new cluster||
 |`-tw` , `--web-service-url-tls`|The web service URL for the new cluster with TLS encryption||
-|`-zk` , `--zookeeper`|The local ZooKeeper quorum connection string||
+|`-md` , `--metadata-store`|The metadata store service url||
 |`--zookeeper-session-timeout-ms`|The local ZooKeeper session timeout. The time unit is in millisecond(ms)|30000|
 
 
@@ -355,6 +337,23 @@ Options
 |`-c`, `--conf`|Configuration file for ZooKeeper||
 |`-server`|Configuration zk address, eg: `127.0.0.1:2181`||
 
+### `autorecovery`
+
+Runs an auto-recovery service.
+
+Usage
+
+```bash
+
+$ pulsar autorecovery options
+
+```
+
+Options
+
+|Flag|Description|Default|
+|---|---|---|
+|`-c`, `--conf`|Configuration for the autorecovery|N/A|
 
 
 ## `pulsar-client`
@@ -407,6 +406,7 @@ Options
 |`-m`, `--messages`|Comma-separated string of messages to send; either -m or -f must be specified|[]|
 |`-n`, `--num-produce`|The number of times to send the message(s); the count of messages/files * num-produce should be below 1000|1|
 |`-r`, `--rate`|Rate (in messages per second) at which to produce; a value 0 means to produce messages as fast as possible|0.0|
+|`-db`, `--disable-batching`|Disable batch sending of messages|false|
 |`-c`, `--chunking`|Split the message and publish in chunks if the message size is larger than the allowed max size|false|
 |`-s`, `--separator`|Character to split messages string with.|","|
 |`-k`, `--key`|Message key to add|key=value string, like k1=v1,k2=v2.|
@@ -462,6 +462,7 @@ $ pulsar-daemon command
 Commands
 * `start`
 * `stop`
+* `restart`
 
 
 ### `start`
@@ -492,7 +493,14 @@ Options
 |---|---|---|
 |-force|Stop the service forcefully if not stopped by normal shutdown.|false|
 
+### `restart`
+Restart a service that has already been started.
 
+```bash
+
+$ pulsar-daemon restart service
+
+```
 
 ## `pulsar-perf`
 A tool for performance testing a Pulsar broker.
@@ -514,6 +522,7 @@ Commands
 * `monitor-brokers`
 * `simulation-client`
 * `simulation-controller`
+* `transaction`
 * `help`
 
 Environment variables
@@ -526,6 +535,7 @@ The table below lists the environment variables that you can use to configure th
 |`PULSAR_CLIENT_CONF`|Configuration file for the client|conf/client.conf|
 |`PULSAR_EXTRA_OPTS`|Extra options to be passed to the JVM||
 |`PULSAR_EXTRA_CLASSPATH`|Extra paths for Pulsar's classpath||
+|`PULSAR_GC_LOG`|Gc options to be passed to the jvm||
 
 
 ### `consume`
@@ -544,7 +554,7 @@ Options
 |Flag|Description|Default|
 |---|---|---|
 |`--auth-params`|Authentication parameters, whose format is determined by the implementation of method `configure` in authentication plugin class. For example, `key1:val1,key2:val2` or `{"key1":"val1","key2":"val2"}`.||
-|`--auth_plugin`|Authentication plugin class name||
+|`--auth-plugin`|Authentication plugin class name||
 |`-ac`, `--auto_ack_chunk_q_full`|Auto ack for the oldest message in consumer's receiver queue if the queue full|false|
 |`--listener-name`|Listener name for the broker||
 |`--acks-delay-millis`|Acknowledgements grouping delay in millis|100|
@@ -553,11 +563,13 @@ Options
 |`-v`, `--encryption-key-value-file`|The file which contains the private key to decrypt payload||
 |`-h`, `--help`|Help message|false|
 |`--conf-file`|Configuration file||
+|`-m`, `--num-messages`|Number of messages to consume in total. If the value is equal to or smaller than 0, it keeps consuming messages.|0|
 |`-e`, `--expire_time_incomplete_chunked_messages`|The expiration time for incomplete chunk messages (in milliseconds)|0|
 |`-c`, `--max-connections`|Max number of TCP connections to a single broker|100|
 |`-mc`, `--max_chunked_msg`|Max pending chunk messages|0|
 |`-n`, `--num-consumers`|Number of consumers (per topic)|1|
 |`-ioThreads`, `--num-io-threads`|Set the number of threads to be used for handling connections to brokers|1|
+|`-lt`, `--num-listener-threads`|Set the number of threads to be used for message listeners|1|
 |`-ns`, `--num-subscriptions`|Number of subscriptions (per topic)|1|
 |`-t`, `--num-topics`|The number of topics|1|
 |`-pm`, `--pool-messages`|Use the pooled message|true|
@@ -571,10 +583,21 @@ Options
 |`-ss`, `--subscriptions`|A list of subscriptions to consume on (e.g. sub1,sub2)|sub|
 |`-st`, `--subscription-type`|Subscriber type. Possible values are Exclusive, Shared, Failover, Key_Shared.|Exclusive|
 |`-sp`, `--subscription-position`|Subscriber position. Possible values are Latest, Earliest.|Latest|
-|`-time`, `--test-duration`|Test duration (in seconds). If the value is 0 or smaller than 0, it keeps consuming messages|0|
+|`-time`, `--test-duration`|Test duration (in seconds). If this value is less than or equal to 0, it keeps consuming messages.|0|
 |`--trust-cert-file`|Path for the trusted TLS certificate file||
 |`--tls-allow-insecure`|Allow insecure TLS connection||
 
+Below are **transaction** related options.
+
+If you want `--txn-timeout`, `--numMessage-perTransaction`, `-nmt`, `-ntxn`, or `-abort` take effect, set `--txn-enable` to true.
+
+|Flag|Description|Default|
+|---|---|---|
+`-tto`, `--txn-timeout`|Set the time of transaction timeout (in second). |10
+`-nmt`, `--numMessage-perTransaction`|The number of messages acknowledged by a transaction. |50
+`-txn`, `--txn-enable`|Enable or disable a transaction.|false
+`-ntxn`|The number of opened transactions. 0 means the number of transactions is unlimited. |0
+`-abort`|Abort a transaction. |true
 
 ### `produce`
 Run a producer
@@ -594,7 +617,7 @@ Options
 |`-am`, `--access-mode`|Producer access mode. Valid values are `Shared`, `Exclusive` and `WaitForExclusive`|Shared|
 |`-au`, `--admin-url`|Pulsar admin URL||
 |`--auth-params`|Authentication parameters, whose format is determined by the implementation of method `configure` in authentication plugin class. For example, `key1:val1,key2:val2` or `{"key1":"val1","key2":"val2"}`.||
-|`--auth_plugin`|Authentication plugin class name||
+|`--auth-plugin`|Authentication plugin class name||
 |`--listener-name`|Listener name for the broker||
 |`-b`, `--batch-time-window`|Batch messages in a window of the specified number of milliseconds|1|
 |`-bb`, `--batch-max-bytes`|Maximum number of bytes per batch|4194304|
@@ -613,9 +636,9 @@ Options
 |`-c`, `--max-connections`|Max number of TCP connections to a single broker|100|
 |`-o`, `--max-outstanding`|Max number of outstanding messages|1000|
 |`-p`, `--max-outstanding-across-partitions`|Max number of outstanding messages across partitions|50000|
+|`-m`, `--num-messages`|Number of messages to publish in total. If this value is less than or equal to 0, it keeps publishing messages.|0|
 |`-mk`, `--message-key-generation-mode`|The generation mode of message key. Valid options are `autoIncrement`, `random`||
 |`-ioThreads`, `--num-io-threads`|Set the number of threads to be used for handling connections to brokers|1|
-|`-m`, `--num-messages`|Number of messages to publish in total. If the value is 0 or smaller than 0, it keeps publishing messages.|0|
 |`-n`, `--num-producers`|The number of producers (per topic)|1|
 |`-threads`, `--num-test-threads`|Number of test threads|1|
 |`-t`, `--num-topic`|The number of topics|1|
@@ -629,11 +652,21 @@ Options
 |`-u`, `--service-url`|Pulsar service URL||
 |`-s`, `--size`|Message size (in bytes)|1024|
 |`-i`, `--stats-interval-seconds`|Statistics interval seconds. If 0, statistics will be disabled.|0|
-|`-time`, `--test-duration`|Test duration (in seconds). If the value is 0 or smaller than 0, it keeps publishing messages.|0|
+|`-time`, `--test-duration`|Test duration (in seconds). If this value is less than or equal to 0, it keeps publishing messages.|0|
 |`--trust-cert-file`|Path for the trusted TLS certificate file||
 |`--warmup-time`|Warm-up time in seconds|1|
 |`--tls-allow-insecure`|Allow insecure TLS connection||
 
+Below are **transaction** related options.
+
+If you want `--txn-timeout`, `--numMessage-perTransaction`, or `-abort` take effect, set `--txn-enable` to true.
+
+|Flag|Description|Default|
+|---|---|---|
+`-tto`, `--txn-timeout`|Set the time of transaction timeout (in second). |5
+`-nmt`, `--numMessage-perTransaction`|The number of messages acknowledged by a transaction. |50
+`-txn`, `--txn-enable`|Enable or disable a transaction.|true
+`-abort`|Abort a transaction. |true
 
 ### `read`
 Run a topic reader
@@ -651,19 +684,21 @@ Options
 |Flag|Description|Default|
 |---|---|---|
 |`--auth-params`|Authentication parameters, whose format is determined by the implementation of method `configure` in authentication plugin class. For example, `key1:val1,key2:val2` or `{"key1":"val1","key2":"val2"}`.||
-|`--auth_plugin`|Authentication plugin class name||
+|`--auth-plugin`|Authentication plugin class name||
 |`--listener-name`|Listener name for the broker||
 |`--conf-file`|Configuration file||
 |`-h`, `--help`|Help message|false|
+|`-n`, `--num-messages`|Number of messages to consume in total. If the value is equal to or smaller than 0, it keeps consuming messages.|0|
 |`-c`, `--max-connections`|Max number of TCP connections to a single broker|100|
 |`-ioThreads`, `--num-io-threads`|Set the number of threads to be used for handling connections to brokers|1|
+|`-lt`, `--num-listener-threads`|Set the number of threads to be used for message listeners|1|
 |`-t`, `--num-topics`|The number of topics|1|
 |`-r`, `--rate`|Simulate a slow message reader (rate in msg/s)|0|
 |`-q`, `--receiver-queue-size`|Size of the receiver queue|1000|
 |`-u`, `--service-url`|Pulsar service URL||
 |`-m`, `--start-message-id`|Start message id. This can be either 'earliest', 'latest' or a specific message id by using 'lid:eid'|earliest|
 |`-i`, `--stats-interval-seconds`|Statistics interval seconds. If 0, statistics will be disabled.|0|
-|`-time`, `--test-duration`|Test duration (in seconds). If the value is 0 or smaller than 0, it keeps consuming messages.|0|
+|`-time`, `--test-duration`|Test duration (in seconds). If this value is less than or equal to 0, it keeps consuming messages.|0|
 |`--trust-cert-file`|Path for the trusted TLS certificate file||
 |`--use-tls`|Use TLS encryption on the connection|false|
 |`--tls-allow-insecure`|Allow insecure TLS connection||
@@ -684,16 +719,19 @@ Options
 |Flag|Description|Default|
 |---|---|---|
 |`--auth-params`|Authentication parameters, whose format is determined by the implementation of method `configure` in authentication plugin class. For example, `key1:val1,key2:val2` or `{"key1":"val1","key2":"val2"}`.||
-|`--auth_plugin`|Authentication plugin class name||
+|`--auth-plugin`|Authentication plugin class name||
 |`--conf-file`|Configuration file||
 |`-h`, `--help`|Help message|false|
-|`-m`, `--num-messages`|Number of messages to publish in total. If the value is 0 or smaller than 0, it keeps publishing messages|0|
+|`-m`, `--num-messages`|Number of messages to publish in total. If this value is less than or equal to 0, it keeps publishing messages.|0|
 |`-t`, `--num-topic`|The number of topics|1|
 |`-f`, `--payload-file`|Use payload from a file instead of empty buffer||
+|`-e`, `--payload-delimiter`|The delimiter used to split lines when using payload from a file|\n|
+|`-fp`, `--format-payload`|Format %i as a message index in the stream from producer and/or %t as the timestamp nanoseconds|false|
+|`-fc`, `--format-class`|Custom formatter class name|`org.apache.pulsar.testclient.DefaultMessageFormatter`|
 |`-u`, `--proxy-url`|Pulsar Proxy URL, e.g., "ws://localhost:8080/"||
 |`-r`, `--rate`|Publish rate msg/s across topics|100|
 |`-s`, `--size`|Message size in byte|1024|
-|`-time`, `--test-duration`|Test duration (in seconds). If the value is 0 or smaller than 0, it keeps publishing messages|0|
+|`-time`, `--test-duration`|Test duration (in seconds). If this value is less than or equal to 0, it keeps publishing messages.|0|
 
 
 ### `managed-ledger`
@@ -717,11 +755,11 @@ Options
 |`-h`, `--help`|Help message|false|
 |`-c`, `--max-connections`|Max number of TCP connections to a single bookie|1|
 |`-o`, `--max-outstanding`|Max number of outstanding requests|1000|
-|`-m`, `--num-messages`|Number of messages to publish in total. If the value is 0 or smaller than 0, it keeps publishing messages|0|
+|`-m`, `--num-messages`|Number of messages to publish in total. If this value is less than or equal to 0, it keeps publishing messages.|0|
 |`-t`, `--num-topic`|Number of managed ledgers|1|
 |`-r`, `--rate`|Write rate msg/s across managed ledgers|100|
 |`-s`, `--size`|Message size in byte|1024|
-|`-time`, `--test-duration`|Test duration (in seconds). If the value is 0 or smaller than 0, it keeps publishing messages|0|
+|`-time`, `--test-duration`|Test duration (in seconds). If this value is less than or equal to 0, it keeps publishing messages.|0|
 |`--threads`|Number of threads writing|1|
 |`-w`, `--write-quorum`|Ledger write quorum|1|
 |`-zk`, `--zookeeperServers`|ZooKeeper connection string||
@@ -785,6 +823,45 @@ Options
 |`--cluster`|The cluster to test on||
 |`-h`, `--help`|Help message|false|
 
+### `transaction`
+
+Run a transaction. For more information, see [Pulsar transactions](txn-why).
+
+**Usage**
+
+```bash
+
+$ pulsar-perf transaction options
+
+```
+
+**Options**
+
+|Flag|Description|Default|
+|---|---|---|
+`-au`, `--admin-url`|Pulsar admin URL.|N/A
+`--conf-file`|Configuration file.|N/A
+`-h`, `--help`|Help messages.|N/A
+`-c`, `--max-connections`|Maximum number of TCP connections to a single broker.|100
+`-ioThreads`, `--num-io-threads`|Set the number of threads to be used for handling connections to brokers. |1
+`-ns`, `--num-subscriptions`|Number of subscriptions per topic.|1
+`-threads`, `--num-test-threads`|Number of test threads. <br /><br />This thread is for a new transaction to ack messages from consumer topics, produce messages to producer topics, and commit or abort this transaction. <br /><br /> Increasing the number of threads increases the parallelism of the performance test, consequently, it increases the intensity of the stress test.|1
+`-nmc`, `--numMessage-perTransaction-consume`|Set the number of messages consumed in a transaction. <br /><br /> If transaction is disabled, it means the number of messages consumed in a task instead of in a transaction.|1
+`-nmp`, `--numMessage-perTransaction-produce`|Set the number of messages produced in a transaction. <br /><br />If transaction is disabled, it means the number of messages produced in a task instead of in a transaction.|1
+`-ntxn`, `--number-txn`|Set the number of transactions. <br /><br /> 0 means the number of transactions is unlimited. <br /><br /> If transaction is disabled, it means the number of tasks instead of transactions. |0
+`-np`, `--partitions`|Create partitioned topics with a given number of partitions. <br /><br /> 0 means not trying to create a topic.
+`-q`, `--receiver-queue-size`|Size of the receiver queue.|1000
+`-u`, `--service-url`|Pulsar service URL.|N/A
+`-sp`, `--subscription-position`|Subscription position.|Earliest
+`-st`, `--subscription-type`|Subscription type.|Shared
+`-ss`, `--subscriptions`|A list of subscriptions to consume. <br /><br /> For example, sub1,sub2.|[sub]
+`-time`, `--test-duration`|Test duration (in second). <br /><br /> 0 means keeping publishing messages.|0
+`--topics-c`|All topics assigned to consumers.|[test-consume]
+`--topics-p`|All topics assigned to producers . |[test-produce]
+`--txn-disEnable`|Disable transaction.|true
+`-tto`, `--txn-timeout`|Set the time of transaction timeout (in second). <br /><br /> If you want `--txn-timeout` takes effect, set `--txn-enable` to true.|5
+`-abort`|Abort the transaction. <br /><br /> If you want `-abort` takes effect, set `--txn-disEnable` to false.|true
+`-txnRate`|Set the rate of opened transactions or tasks. <br /><br /> 0 means no limit.|0
 
 ### `help`
 This help message
@@ -829,9 +906,10 @@ The table below lists the environment variables that you can use to configure th
 |ENTRY_FORMATTER_CLASS|The Java class used to format entries||
 |BOOKIE_PID_DIR|Folder where the BookKeeper server PID file should be stored||
 |BOOKIE_STOP_TIMEOUT|Wait time before forcefully killing the Bookie server instance if attempts to stop it are not successful||
+|BOOKIE_GC_LOG|Gc options to be passed to the jvm||
 
 
-### `auto-recovery`
+### `autorecovery`
 Runs an auto-recovery service
 
 Usage
diff --git a/site2/website-next/versioned_docs/version-2.2.1/reference-terminology.md b/site2/website-next/versioned_docs/version-2.2.1/reference-terminology.md
index d0e7368..ebc114d 100644
--- a/site2/website-next/versioned_docs/version-2.2.1/reference-terminology.md
+++ b/site2/website-next/versioned_docs/version-2.2.1/reference-terminology.md
@@ -2,7 +2,6 @@
 id: reference-terminology
 title: Pulsar Terminology
 sidebar_label: "Terminology"
-original_id: reference-terminology
 ---
 
 Here is a glossary of terms related to Apache Pulsar:
diff --git a/site2/website-next/versioned_docs/version-2.2.1/security-athenz.md b/site2/website-next/versioned_docs/version-2.2.1/security-athenz.md
index 947c3f4..ba27ba4 100644
--- a/site2/website-next/versioned_docs/version-2.2.1/security-athenz.md
+++ b/site2/website-next/versioned_docs/version-2.2.1/security-athenz.md
@@ -2,7 +2,6 @@
 id: security-athenz
 title: Authentication using Athenz
 sidebar_label: "Authentication using Athenz"
-original_id: security-athenz
 ---
 
 [Athenz](https://github.com/AthenZ/athenz) is a role-based authentication/authorization system. In Pulsar, you can use Athenz role tokens (also known as *z-tokens*) to establish the identify of the client.
diff --git a/site2/website-next/versioned_docs/version-2.2.1/security-authorization.md b/site2/website-next/versioned_docs/version-2.2.1/security-authorization.md
index e678587..b1003d2 100644
--- a/site2/website-next/versioned_docs/version-2.2.1/security-authorization.md
+++ b/site2/website-next/versioned_docs/version-2.2.1/security-authorization.md
@@ -2,7 +2,6 @@
 id: security-authorization
 title: Authentication and authorization in Pulsar
 sidebar_label: "Authorization and ACLs"
-original_id: security-authorization
 ---
 
 
diff --git a/site2/website-next/versioned_docs/version-2.2.1/security-encryption.md b/site2/website-next/versioned_docs/version-2.2.1/security-encryption.md
index cc43082..90d0dbe 100644
--- a/site2/website-next/versioned_docs/version-2.2.1/security-encryption.md
+++ b/site2/website-next/versioned_docs/version-2.2.1/security-encryption.md
@@ -2,7 +2,6 @@
 id: security-encryption
 title: Pulsar Encryption
 sidebar_label: "End-to-End Encryption"
-original_id: security-encryption
 ---
 
 import Tabs from '@theme/Tabs';
diff --git a/site2/website-next/versioned_docs/version-2.2.1/security-extending.md b/site2/website-next/versioned_docs/version-2.2.1/security-extending.md
index c088e3a..dd0030e 100644
--- a/site2/website-next/versioned_docs/version-2.2.1/security-extending.md
+++ b/site2/website-next/versioned_docs/version-2.2.1/security-extending.md
@@ -2,7 +2,6 @@
 id: security-extending
 title: Extending Authentication and Authorization in Pulsar
 sidebar_label: "Extending"
-original_id: security-extending
 ---
 
 Pulsar provides a way to use custom authentication and authorization mechanisms.
diff --git a/site2/website-next/versioned_docs/version-2.2.1/security-overview.md b/site2/website-next/versioned_docs/version-2.2.1/security-overview.md
index 82a289f..91f4ba8 100644
--- a/site2/website-next/versioned_docs/version-2.2.1/security-overview.md
+++ b/site2/website-next/versioned_docs/version-2.2.1/security-overview.md
@@ -2,7 +2,6 @@
 id: security-overview
 title: Pulsar security overview
 sidebar_label: "Overview"
-original_id: security-overview
 ---
 
 As the central message bus for a business, Apache Pulsar is frequently used for storing mission-critical data. Therefore, enabling security features in Pulsar is crucial.
diff --git a/site2/website-next/versioned_docs/version-2.2.1/sql-deployment-configurations.md b/site2/website-next/versioned_docs/version-2.2.1/sql-deployment-configurations.md
index 6c6fd87..9e7ff5a 100644
--- a/site2/website-next/versioned_docs/version-2.2.1/sql-deployment-configurations.md
+++ b/site2/website-next/versioned_docs/version-2.2.1/sql-deployment-configurations.md
@@ -2,7 +2,6 @@
 id: sql-deployment-configurations
 title: Pulsar SQL configuration and deployment
 sidebar_label: "Configuration and deployment"
-original_id: sql-deployment-configurations
 ---
 
 You can configure Presto Pulsar connector and deploy a cluster with the following instruction.
@@ -27,6 +26,84 @@ pulsar.entry-read-batch-size=100
 # default number of splits to use per query
 pulsar.target-num-splits=4
 
+# max size of one batch message (default value is 5MB)
+pulsar.max-message-size=5242880
+
+# number of split used when querying data from pulsar
+pulsar.target-num-splits=2
+
+# size of queue to buffer entry read from pulsar
+pulsar.max-split-entry-queue-size=1000
+
+# size of queue to buffer message extract from entries
+pulsar.max-split-message-queue-size=10000
+
+# status provider to record connector metrics
+pulsar.stats-provider=org.apache.bookkeeper.stats.NullStatsProvider
+
+# config in map format for stats provider e.g. {"key1":"val1","key2":"val2"}
+pulsar.stats-provider-configs={}
+
+# whether to rewrite Pulsar's default topic delimiter '/'
+pulsar.namespace-delimiter-rewrite-enable=false
+
+# delimiter used to rewrite Pulsar's default delimiter '/', use if default is causing incompatibility with other system like Superset
+pulsar.rewrite-namespace-delimiter=“/”
+
+# maximum number of thread pool size for ledger offloader.
+pulsar.managed-ledger-offload-max-threads=2
+
+# driver used to offload or read cold data to or from long-term storage
+pulsar.managed-ledger-offload-driver=null
+
+# directory to load offloaders nar file.
+pulsar.offloaders-directory="./offloaders"
+
+# properties and configurations related to specific offloader implementation as map e.g. {"key1":"val1","key2":"val2"}
+pulsar.offloader-properties={}
+
+# authentication plugin used to authenticate to Pulsar cluster
+pulsar.auth-plugin=null
+
+# authentication parameter used to authenticate to the Pulsar cluster as a string e.g. "key1:val1,key2:val2".
+pulsar.auth-params=null
+
+# whether the Pulsar client accept an untrusted TLS certificate from broker
+pulsar.tls-allow-insecure-connection=null
+
+# whether to allow hostname verification when a client connects to broker over TLS.
+pulsar.tls-hostname-verification-enable=null
+
+# path for the trusted TLS certificate file of Pulsar broker
+pulsar.tls-trust-cert-file-path=null
+
+# set the threshold for BookKeeper request throttle, default is disabled
+pulsar.bookkeeper-throttle-value=0
+
+# set the number of IO thread
+pulsar.bookkeeper-num-io-threads=2 * Runtime.getRuntime().availableProcessors()
+
+# set the number of worker thread
+pulsar.bookkeeper-num-worker-threads=Runtime.getRuntime().availableProcessors()
+
+# whether to use BookKeeper V2 wire protocol
+pulsar.bookkeeper-use-v2-protocol=true
+
+# interval to check the need for sending an explicit LAC, default is disabled
+pulsar.bookkeeper-explicit-interval=0
+
+# size for managed ledger entry cache (in MB).
+pulsar.managed-ledger-cache-size-MB=0
+
+# number of threads to be used for managed ledger tasks dispatching
+pulsar.managed-ledger-num-worker-threads=Runtime.getRuntime().availableProcessors()
+
+# number of threads to be used for managed ledger scheduled tasks
+pulsar.managed-ledger-num-scheduler-threads=Runtime.getRuntime().availableProcessors()
+
+# directory used to store extraction NAR file
+pulsar.nar-extraction-directory=System.getProperty("java.io.tmpdir")
+
 ```
 
 You can connect Presto to a Pulsar cluster with multiple hosts. To configure multiple hosts for brokers, add multiple URLs to `pulsar.web-service-url`. To configure multiple hosts for ZooKeeper, add multiple URIs to `pulsar.zookeeper-uri`. The following is an example.
@@ -38,6 +115,21 @@ pulsar.zookeeper-uri=localhost1,localhost2:2181
 
 ```
 
+A frequently asked question is why my latest message not showing up when querying with Pulsar SQL.
+It's not a bug but controlled by a setting, by default BookKeeper LAC only advanced when subsequent entries are added.
+If there is no subsequent entries added, the last entry written will not be visible to readers until the ledger is closed.
+This is not a problem for Pulsar which uses managed ledger, but Pulsar SQL directly read from BookKeeper ledger.
+We can add following setting to change the behavior:
+In Broker config, set
+bookkeeperExplicitLacIntervalInMills > 0
+bookkeeperUseV2WireProtocol=false
+
+And in Presto config, set
+pulsar.bookkeeper-explicit-interval > 0
+pulsar.bookkeeper-use-v2-protocol=false
+
+However,keep in mind that using bk V3 protocol will introduce additional GC overhead to BK as it uses Protobuf.
+
 ## Query data from existing Presto clusters
 
 If you already have a Presto cluster, you can copy the Presto Pulsar connector plugin to your existing cluster. Download the archived plugin package with the following command.
diff --git a/site2/website-next/versioned_docs/version-2.2.1/sql-getting-started.md b/site2/website-next/versioned_docs/version-2.2.1/sql-getting-started.md
index 8a5cd71..6a2d873 100644
--- a/site2/website-next/versioned_docs/version-2.2.1/sql-getting-started.md
+++ b/site2/website-next/versioned_docs/version-2.2.1/sql-getting-started.md
@@ -2,7 +2,6 @@
 id: sql-getting-started
 title: Query data with Pulsar SQL
 sidebar_label: "Query data"
-original_id: sql-getting-started
 ---
 
 Before querying data in Pulsar, you need to install Pulsar and built-in connectors. 
diff --git a/site2/website-next/versioned_docs/version-2.2.1/sql-overview.md b/site2/website-next/versioned_docs/version-2.2.1/sql-overview.md
index 4a4d5f0..2f827f4 100644
--- a/site2/website-next/versioned_docs/version-2.2.1/sql-overview.md
+++ b/site2/website-next/versioned_docs/version-2.2.1/sql-overview.md
@@ -2,7 +2,6 @@
 id: sql-overview
 title: Pulsar SQL Overview
 sidebar_label: "Overview"
-original_id: sql-overview
 ---
 
 Apache Pulsar is used to store streams of event data, and the event data is structured with predefined fields. With the implementation of the [Schema Registry](schema-get-started), you can store structured data in Pulsar and query the data by using [Trino (formerly Presto SQL)](https://trino.io/).
diff --git a/site2/website-next/versioned_docs/version-2.2.1/getting-started-docker.md b/site2/website-next/versioned_docs/version-2.2.1/standalone-docker.md
similarity index 100%
rename from site2/website-next/versioned_docs/version-2.2.1/getting-started-docker.md
rename to site2/website-next/versioned_docs/version-2.2.1/standalone-docker.md
diff --git a/site2/website-next/versioned_docs/version-2.2.1/getting-started-standalone.md b/site2/website-next/versioned_docs/version-2.2.1/standalone.md
similarity index 100%
rename from site2/website-next/versioned_docs/version-2.2.1/getting-started-standalone.md
rename to site2/website-next/versioned_docs/version-2.2.1/standalone.md
diff --git a/site2/website-next/versioned_docs/version-2.3.0/adaptors-spark.md b/site2/website-next/versioned_docs/version-2.3.0/adaptors-spark.md
index e14f13b..afa5a7e 100644
--- a/site2/website-next/versioned_docs/version-2.3.0/adaptors-spark.md
+++ b/site2/website-next/versioned_docs/version-2.3.0/adaptors-spark.md
@@ -2,7 +2,6 @@
 id: adaptors-spark
 title: Pulsar adaptor for Apache Spark
 sidebar_label: "Apache Spark"
-original_id: adaptors-spark
 ---
 
 ## Spark Streaming receiver
diff --git a/site2/website-next/versioned_docs/version-2.3.0/admin-api-brokers.md b/site2/website-next/versioned_docs/version-2.3.0/admin-api-brokers.md
index dbac453..10a90ca 100644
--- a/site2/website-next/versioned_docs/version-2.3.0/admin-api-brokers.md
+++ b/site2/website-next/versioned_docs/version-2.3.0/admin-api-brokers.md
@@ -2,7 +2,6 @@
 id: admin-api-brokers
 title: Managing Brokers
 sidebar_label: "Brokers"
-original_id: admin-api-brokers
 ---
 
 import Tabs from '@theme/Tabs';
@@ -26,9 +25,9 @@ Pulsar brokers consist of two components:
 
 [Brokers](reference-terminology.md#broker) can be managed via:
 
-* The [`brokers`](reference-pulsar-admin.md#brokers) command of the [`pulsar-admin`](reference-pulsar-admin) tool
+* The `brokers` command of the [`pulsar-admin`](https://pulsar.apache.org/tools/pulsar-admin/) tool
 * The `/admin/v2/brokers` endpoint of the admin {@inject: rest:REST:/} API
-* The `brokers` method of the {@inject: javadoc:PulsarAdmin:/admin/org/apache/pulsar/client/admin/PulsarAdmin.html} object in the [Java API](client-libraries-java)
+* The `brokers` method of the `PulsarAdmin` object in the [Java API](client-libraries-java)
 
 In addition to being configurable when you start them up, brokers can also be [dynamically configured](#dynamic-broker-configuration).
 
diff --git a/site2/website-next/versioned_docs/version-2.3.0/admin-api-clusters.md b/site2/website-next/versioned_docs/version-2.3.0/admin-api-clusters.md
index 972c7e1..8687ae6 100644
--- a/site2/website-next/versioned_docs/version-2.3.0/admin-api-clusters.md
+++ b/site2/website-next/versioned_docs/version-2.3.0/admin-api-clusters.md
@@ -2,7 +2,6 @@
 id: admin-api-clusters
 title: Managing Clusters
 sidebar_label: "Clusters"
-original_id: admin-api-clusters
 ---
 
 import Tabs from '@theme/Tabs';
@@ -13,7 +12,7 @@ import TabItem from '@theme/TabItem';
 >
 > This page only shows **some frequently used operations**.
 >
-> - For the latest and complete information about `Pulsar admin`, including commands, flags, descriptions, and more information, see [Pulsar admin doc](https://pulsar.apache.org/tools/pulsar-admin/).
+> - For the latest and complete information about `Pulsar admin`, including commands, flags, descriptions, and more, see [Pulsar admin doc](https://pulsar.apache.org/tools/pulsar-admin/)
 > 
 > - For the latest and complete information about `REST API`, including parameters, responses, samples, and more, see {@inject: rest:REST:/} API doc.
 > 
@@ -24,9 +23,9 @@ servers (aka [bookies](reference-terminology.md#bookie)), and a [ZooKeeper](http
 
 Clusters can be managed via:
 
-* The [`clusters`](reference-pulsar-admin.md#clusters) command of the [`pulsar-admin`](reference-pulsar-admin) tool
+* The `clusters` command of the [`pulsar-admin`]([reference-pulsar-admin.md](https://pulsar.apache.org/tools/pulsar-admin/)) tool
 * The `/admin/v2/clusters` endpoint of the admin {@inject: rest:REST:/} API
-* The `clusters` method of the {@inject: javadoc:PulsarAdmin:/admin/org/apache/pulsar/client/admin/PulsarAdmin} object in the [Java API](client-libraries-java)
+* The `clusters` method of the `PulsarAdmin` object in the [Java API](client-libraries-java)
 
 ## Clusters resources
 
diff --git a/site2/website-next/versioned_docs/version-2.3.0/admin-api-non-persistent-topics.md b/site2/website-next/versioned_docs/version-2.3.0/admin-api-non-persistent-topics.md
index 12220de..78dac35 100644
--- a/site2/website-next/versioned_docs/version-2.3.0/admin-api-non-persistent-topics.md
+++ b/site2/website-next/versioned_docs/version-2.3.0/admin-api-non-persistent-topics.md
@@ -2,7 +2,6 @@
 id: admin-api-non-persistent-topics
 title: Managing non-persistent topics
 sidebar_label: "Non-Persistent topics"
-original_id: admin-api-non-persistent-topics
 ---
 
 For details of the content, refer to [manage topics](admin-api-topics).
\ No newline at end of file
diff --git a/site2/website-next/versioned_docs/version-2.3.0/admin-api-overview.md b/site2/website-next/versioned_docs/version-2.3.0/admin-api-overview.md
index 7936a9c..bd1e1f5 100644
--- a/site2/website-next/versioned_docs/version-2.3.0/admin-api-overview.md
+++ b/site2/website-next/versioned_docs/version-2.3.0/admin-api-overview.md
@@ -2,7 +2,6 @@
 id: admin-api-overview
 title: Pulsar admin interface
 sidebar_label: "Overview"
-original_id: admin-api-overview
 ---
 
 import Tabs from '@theme/Tabs';
diff --git a/site2/website-next/versioned_docs/version-2.3.0/admin-api-permissions.md b/site2/website-next/versioned_docs/version-2.3.0/admin-api-permissions.md
index e2ca469..faedbf1 100644
--- a/site2/website-next/versioned_docs/version-2.3.0/admin-api-permissions.md
+++ b/site2/website-next/versioned_docs/version-2.3.0/admin-api-permissions.md
@@ -2,7 +2,6 @@
 id: admin-api-permissions
 title: Managing permissions
 sidebar_label: "Permissions"
-original_id: admin-api-permissions
 ---
 
 import Tabs from '@theme/Tabs';
@@ -13,7 +12,7 @@ import TabItem from '@theme/TabItem';
 >
 > This page only shows **some frequently used operations**.
 >
-> - For the latest and complete information about `Pulsar admin`, including commands, flags, descriptions, and more information, see [Pulsar admin doc](https://pulsar.apache.org/tools/pulsar-admin/).
+> - For the latest and complete information about `Pulsar admin`, including commands, flags, descriptions, and more, see [Pulsar admin doc](https://pulsar.apache.org/tools/pulsar-admin/)
 > 
 > - For the latest and complete information about `REST API`, including parameters, responses, samples, and more, see {@inject: rest:REST:/} API doc.
 > 
diff --git a/site2/website-next/versioned_docs/version-2.3.0/admin-api-persistent-topics.md b/site2/website-next/versioned_docs/version-2.3.0/admin-api-persistent-topics.md
index b6d293b..8a7abae 100644
--- a/site2/website-next/versioned_docs/version-2.3.0/admin-api-persistent-topics.md
+++ b/site2/website-next/versioned_docs/version-2.3.0/admin-api-persistent-topics.md
@@ -2,7 +2,6 @@
 id: admin-api-persistent-topics
 title: Managing persistent topics
 sidebar_label: "Persistent topics"
-original_id: admin-api-persistent-topics
 ---
 
 For details of the content, refer to [manage topics](admin-api-topics).
\ No newline at end of file
diff --git a/site2/website-next/versioned_docs/version-2.3.0/admin-api-schemas.md b/site2/website-next/versioned_docs/version-2.3.0/admin-api-schemas.md
index 9ffe21f..8399a03 100644
--- a/site2/website-next/versioned_docs/version-2.3.0/admin-api-schemas.md
+++ b/site2/website-next/versioned_docs/version-2.3.0/admin-api-schemas.md
@@ -2,6 +2,5 @@
 id: admin-api-schemas
 title: Managing Schemas
 sidebar_label: "Schemas"
-original_id: admin-api-schemas
 ---
 
diff --git a/site2/website-next/versioned_docs/version-2.3.0/admin-api-tenants.md b/site2/website-next/versioned_docs/version-2.3.0/admin-api-tenants.md
index fe68336..570ac31 100644
--- a/site2/website-next/versioned_docs/version-2.3.0/admin-api-tenants.md
+++ b/site2/website-next/versioned_docs/version-2.3.0/admin-api-tenants.md
@@ -2,7 +2,6 @@
 id: admin-api-tenants
 title: Managing Tenants
 sidebar_label: "Tenants"
-original_id: admin-api-tenants
 ---
 
 import Tabs from '@theme/Tabs';
@@ -13,7 +12,7 @@ import TabItem from '@theme/TabItem';
 >
 > This page only shows **some frequently used operations**.
 >
-> - For the latest and complete information about `Pulsar admin`, including commands, flags, descriptions, and more information, see [Pulsar admin doc](https://pulsar.apache.org/tools/pulsar-admin/).
+> - For the latest and complete information about `Pulsar admin`, including commands, flags, descriptions, and more, see [Pulsar admin doc](https://pulsar.apache.org/tools/pulsar-admin/)
 > 
 > - For the latest and complete information about `REST API`, including parameters, responses, samples, and more, see {@inject: rest:REST:/} API doc.
 > 
@@ -80,22 +79,26 @@ $ pulsar-admin tenants create my-tenant
 
 ```
 
-When creating a tenant, you can assign admin roles using the `-r`/`--admin-roles` flag. You can specify multiple roles as a comma-separated list. Here are some examples:
+When creating a tenant, you can optionally assign admin roles using the `-r`/`--admin-roles`
+flag, and clusters using the `-c`/`--allowed-clusters` flag. You can specify multiple values
+as a comma-separated list. Here are some examples:
 
 ```shell
 
 $ pulsar-admin tenants create my-tenant \
-  --admin-roles role1,role2,role3
+  --admin-roles role1,role2,role3 \
+  --allowed-clusters cluster1
 
 $ pulsar-admin tenants create my-tenant \
   -r role1
+  -c cluster1
 
 ```
 
 </TabItem>
 <TabItem value="REST API">
 
-{@inject: endpoint|POST|/admin/v2/tenants/:tenant|operation/createTenant?version=@pulsar:version_number@}
+{@inject: endpoint|PUT|/admin/v2/tenants/:tenant|operation/createTenant?version=@pulsar:version_number@}
 
 </TabItem>
 <TabItem value="JAVA">
@@ -140,7 +143,7 @@ $ pulsar-admin tenants get my-tenant
 </TabItem>
 <TabItem value="REST API">
 
-{@inject: endpoint|GET|/admin/v2/tenants/:cluster|operation/getTenant?version=@pulsar:version_number@}
+{@inject: endpoint|GET|/admin/v2/tenants/:tenant|operation/getTenant?version=@pulsar:version_number@}
 
 </TabItem>
 <TabItem value="JAVA">
@@ -175,7 +178,7 @@ $ pulsar-admin tenants delete my-tenant
 </TabItem>
 <TabItem value="REST API">
 
-{@inject: endpoint|DELETE|/admin/v2/tenants/:cluster|operation/deleteTenant?version=@pulsar:version_number@}
+{@inject: endpoint|DELETE|/admin/v2/tenants/:tenant|operation/deleteTenant?version=@pulsar:version_number@}
 
 </TabItem>
 <TabItem value="JAVA">
@@ -210,7 +213,7 @@ $ pulsar-admin tenants update my-tenant
 </TabItem>
 <TabItem value="REST API">
 
-{@inject: endpoint|DELETE|/admin/v2/tenants/:cluster|operation/updateTenant?version=@pulsar:version_number@}
+{@inject: endpoint|POST|/admin/v2/tenants/:tenant|operation/updateTenant?version=@pulsar:version_number@}
 
 </TabItem>
 <TabItem value="JAVA">
diff --git a/site2/website-next/versioned_docs/version-2.3.0/administration-dashboard.md b/site2/website-next/versioned_docs/version-2.3.0/administration-dashboard.md
index 514b076..1eb0404 100644
--- a/site2/website-next/versioned_docs/version-2.3.0/administration-dashboard.md
+++ b/site2/website-next/versioned_docs/version-2.3.0/administration-dashboard.md
@@ -7,7 +7,7 @@ original_id: administration-dashboard
 
 :::note
 
-Pulsar dashboard is deprecated. If you want to manage and monitor the stats of your topics, use [Pulsar Manager](administration-pulsar-manager). 
+Pulsar dashboard is deprecated. We recommend you use [Pulsar Manager](administration-pulsar-manager) to manage and monitor the stats of your topics. 
 
 :::
 
@@ -53,17 +53,17 @@ $ docker run -p 80:80 \
 ```
 
  
-You need to specify only one service URL for a Pulsar cluster. Internally, the collector figures out all the existing clusters and the brokers from where it needs to pull the metrics. If you connect the dashboard to Pulsar running in standalone mode, the URL is `http://<broker-ip>:8080` by default. `<broker-ip>` is the ip address or hostname of the machine running Pulsar standalone. The ip address or hostname should be accessible from the docker instance running dashboard.
+You need to specify only one service URL for a Pulsar cluster. Internally, the collector figures out all the existing clusters and the brokers from where it needs to pull the metrics. If you connect the dashboard to Pulsar running in standalone mode, the URL is `http://<broker-ip>:8080` by default. `<broker-ip>` is the IP address or hostname of the machine that runs Pulsar standalone. The IP address or hostname should be accessible from the running dashboard in the docker instance.
 
-Once the Docker container runs, the web dashboard is accessible via `localhost` or whichever host that Docker uses.
+Once the Docker container starts, the web dashboard is accessible via `localhost` or whichever host that Docker uses.
 
-> The `SERVICE_URL` that the dashboard uses needs to be reachable from inside the Docker container
+> The `SERVICE_URL` that the dashboard uses needs to be reachable from inside the Docker container.
 
 If the Pulsar service runs in standalone mode in `localhost`, the `SERVICE_URL` has to
-be the IP of the machine.
+be the IP address of the machine.
 
 Similarly, given the Pulsar standalone advertises itself with localhost by default, you need to
-explicitly set the advertise address to the host IP. For example:
+explicitly set the advertise address to the host IP address. For example:
 
 ```shell
 
diff --git a/site2/website-next/versioned_docs/version-2.3.0/administration-geo.md b/site2/website-next/versioned_docs/version-2.3.0/administration-geo.md
index 84367f8..d956817 100644
--- a/site2/website-next/versioned_docs/version-2.3.0/administration-geo.md
+++ b/site2/website-next/versioned_docs/version-2.3.0/administration-geo.md
@@ -2,9 +2,12 @@
 id: administration-geo
 title: Pulsar geo-replication
 sidebar_label: "Geo-replication"
-original_id: administration-geo
 ---
 
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
+
+
 *Geo-replication* is the replication of persistently stored message data across multiple clusters of a Pulsar instance.
 
 ## How geo-replication works
@@ -44,8 +47,6 @@ All messages produced in any of the three clusters are delivered to all subscrip
 
 ## Configure replication
 
-As stated in [Geo-replication and Pulsar properties](#geo-replication-and-pulsar-properties) section, geo-replication in Pulsar is managed at the [tenant](reference-terminology.md#tenant) level.
-
 The following example connects three clusters: **us-east**, **us-west**, and **us-cent**.
 
 ### Connect replication clusters
@@ -107,7 +108,11 @@ $ bin/pulsar-admin tenants create my-tenant \
 
 To update permissions of an existing tenant, use `update` instead of `create`.
 
-### Enable geo-replication namespaces
+### Enable geo-replication 
+
+You can enable geo-replication at **namespace** or **topic** level.
+
+#### Enable geo-replication at namespace level
 
 You can create a namespace with the following command sample.
 
@@ -126,11 +131,24 @@ $ bin/pulsar-admin namespaces set-clusters my-tenant/my-namespace \
 
 ```
 
-You can change the replication clusters for a namespace at any time, without disruption to ongoing traffic. Replication channels are immediately set up or stopped in all clusters as soon as the configuration changes.
+#### Enable geo-replication at topic level
 
-### Use topics with geo-replication
+You can set geo-replication at topic level using the command `pulsar-admin topics set-replication-clusters`. For the latest and complete information about `Pulsar admin`, including commands, flags, descriptions, and more information, see [Pulsar admin doc](https://pulsar.apache.org/tools/pulsar-admin/).
+
+```shell
+
+$ bin/pulsar-admin topics set-replication-clusters --clusters us-west,us-east,us-cent my-tenant/my-namespace/my-topic
+
+```
 
-Once you create a geo-replication namespace, any topics that producers or consumers create within that namespace is replicated across clusters. Typically, each application uses the `serviceUrl` for the local cluster.
+:::tip
+
+- You can change the replication clusters for a namespace at any time, without disruption to ongoing traffic. Replication channels are immediately set up or stopped in all clusters as soon as the configuration changes.
+- Once you create a geo-replication namespace, any topics that producers or consumers create within that namespace are replicated across clusters. Typically, each application uses the `serviceUrl` for the local cluster.
+
+:::
+
+### Use topics with geo-replication
 
 #### Selective replication
 
@@ -158,14 +176,30 @@ producer.newMessage()
 
 #### Topic stats
 
-Topic-specific statistics for geo-replication topics are available via the [`pulsar-admin`](reference-pulsar-admin) tool and {@inject: rest:REST:/} API:
+You can check topic-specific statistics for geo-replication topics using one of the following methods.
+
+<Tabs 
+  defaultValue="pulsar-admin"
+  values={[{"label":"pulsar-admin","value":"pulsar-admin"},{"label":"REST API","value":"REST API"}]}>
+<TabItem value="pulsar-admin">
+
+Use the [`pulsar-admin topics stats`](https://pulsar.apache.org/tools/pulsar-admin/) command.
 
 ```shell
 
-$ bin/pulsar-admin persistent stats persistent://my-tenant/my-namespace/my-topic
+$ bin/pulsar-admin topics stats persistent://my-tenant/my-namespace/my-topic
 
 ```
 
+</TabItem>
+<TabItem value="REST API">
+
+{@inject: endpoint|GET|/admin/v2/:schema/:tenant/:namespace/:topic/stats|operation/getStats?version=@pulsar:version_number@}
+
+</TabItem>
+
+</Tabs>
+
 Each cluster reports its own local stats, including the incoming and outgoing replication rates and backlogs.
 
 #### Delete a geo-replication topic
@@ -211,4 +245,5 @@ Consumer<String> consumer = client.newConsumer(Schema.STRING)
 
 ### Limitations
 
-When you enable replicated subscription, you're creating a consistent distributed snapshot to establish an association between message ids from different clusters. The snapshots are taken periodically. The default value is `1 second`. It means that a consumer failing over to a different cluster can potentially receive 1 second of duplicates. You can also configure the frequency of the snapshot in the `broker.conf` file.
+* When you enable replicated subscription, you're creating a consistent distributed snapshot to establish an association between message ids from different clusters. The snapshots are taken periodically. The default value is `1 second`. It means that a consumer failing over to a different cluster can potentially receive 1 second of duplicates. You can also configure the frequency of the snapshot in the `broker.conf` file.
+* Only the base line cursor position is synced in replicated subscriptions while the individual acknowledgments are not synced. This means the messages acknowledged out-of-order could end up getting delivered again, in the case of a cluster failover.
\ No newline at end of file
diff --git a/site2/website-next/versioned_docs/version-2.3.0/administration-stats.md b/site2/website-next/versioned_docs/version-2.3.0/administration-stats.md
index ac0c036..2ccd73c 100644
--- a/site2/website-next/versioned_docs/version-2.3.0/administration-stats.md
+++ b/site2/website-next/versioned_docs/version-2.3.0/administration-stats.md
@@ -2,7 +2,6 @@
 id: administration-stats
 title: Pulsar stats
 sidebar_label: "Pulsar statistics"
-original_id: administration-stats
 ---
 
 ## Partitioned topics
diff --git a/site2/website-next/versioned_docs/version-2.3.0/client-libraries-java.md b/site2/website-next/versioned_docs/version-2.3.0/client-libraries-java.md
index 28504f8..b8150e1 100644
--- a/site2/website-next/versioned_docs/version-2.3.0/client-libraries-java.md
+++ b/site2/website-next/versioned_docs/version-2.3.0/client-libraries-java.md
@@ -2,10 +2,9 @@
 id: client-libraries-java
 title: Pulsar Java client
 sidebar_label: "Java"
-original_id: client-libraries-java
 ---
 
-You can use Pulsar Java client to create Java [producer](#producer), [consumer](#consumer), and [readers](#reader-interface) of messages and to perform [administrative tasks](admin-api-overview). The current version of the Java client is **@pulsar:version@**.
+You can use a Pulsar Java client to create the Java [producer](#producer), [consumer](#consumer), and [readers](#reader) of messages and to perform [administrative tasks](admin-api-overview). The current Java client version is **@pulsar:version@**.
 
 All the methods in [producer](#producer), [consumer](#consumer), and [reader](#reader) of a Java client are thread-safe.
 
@@ -15,7 +14,7 @@ Package | Description | Maven Artifact
 :-------|:------------|:--------------
 [`org.apache.pulsar.client.api`](/api/client) | The producer and consumer API | [org.apache.pulsar:pulsar-client:@pulsar:version@](http://search.maven.org/#artifactdetails%7Corg.apache.pulsar%7Cpulsar-client%7C@pulsar:version@%7Cjar)
 [`org.apache.pulsar.client.admin`](/api/admin) | The Java [admin API](admin-api-overview) | [org.apache.pulsar:pulsar-client-admin:@pulsar:version@](http://search.maven.org/#artifactdetails%7Corg.apache.pulsar%7Cpulsar-client-admin%7C@pulsar:version@%7Cjar)
-`org.apache.pulsar.client.all` |Includes both `pulsar-client` and `pulsar-client-admin`<br /><br /> Both `pulsar-client` and `pulsar-client-admin` are shaded packages and they shade dependencies independently. Consequently, the applications using both `pulsar-client` and `pulsar-client-admin` have redundant shaded classes. It would be troublesome if you introduce new dependencies but forget to update shading rules. <br /><br /> In this case, you can use `pulsar-client-all`, which shades  [...]
+`org.apache.pulsar.client.all` |Include both `pulsar-client` and `pulsar-client-admin`<br /> Both `pulsar-client` and `pulsar-client-admin` are shaded packages and they shade dependencies independently. Consequently, the applications using both `pulsar-client` and `pulsar-client-admin` have redundant shaded classes. It would be troublesome if you introduce new dependencies but forget to update shading rules. <br /> In this case, you can use `pulsar-client-all`, which shades dependencies  [...]
 
 This document focuses only on the client API for producing and consuming messages on Pulsar topics. For how to use the Java admin client, see [Pulsar admin interface](admin-api-overview).
 
@@ -118,35 +117,56 @@ PulsarClient client = PulsarClient.builder()
 
 If you create a client, you can use the `loadConf` configuration. The following parameters are available in `loadConf`.
 
-| Type | Name | <div>Description</div> | Default
+| Name | Type |  <div>Description</div> | Default
 |---|---|---|---
-String | `serviceUrl` |Service URL provider for Pulsar service | None
-String | `authPluginClassName` | Name of the authentication plugin | None
-String | `authParams` | String represents parameters for the authentication plugin <br /><br />**Example**<br /> key1:val1,key2:val2|None
-long|`operationTimeoutMs`|Operation timeout |30000
-long|`statsIntervalSeconds`|Interval between each stats info<br /><br />Stats is activated with positive `statsInterval`<br /><br />Set `statsIntervalSeconds` to 1 second at least |60
-int|`numIoThreads`| The number of threads used for handling connections to brokers | 1 
-int|`numListenerThreads`|The number of threads used for handling message listeners. The listener thread pool is shared across all the consumers and readers using the "listener" model to get messages. For a given consumer, the listener is always invoked from the same thread to ensure ordering. If you want multiple threads to process a single topic, you need to create a [`shared`](https://pulsar.apache.org/docs/en/next/concepts-messaging/#shared) subscription and multiple consumers for thi [...]
-boolean|`useTcpNoDelay`|Whether to use TCP no-delay flag on the connection to disable Nagle algorithm |true
-boolean |`useTls` |Whether to use TLS encryption on the connection| false
-string | `tlsTrustCertsFilePath` |Path to the trusted TLS certificate file|None
-boolean|`tlsAllowInsecureConnection`|Whether the Pulsar client accepts untrusted TLS certificate from broker | false
-boolean | `tlsHostnameVerificationEnable` | Whether to enable TLS hostname verification|false
-int|`concurrentLookupRequest`|The number of concurrent lookup requests allowed to send on each broker connection to prevent overload on broker|5000
-int|`maxLookupRequest`|The maximum number of lookup requests allowed on each broker connection to prevent overload on broker | 50000
-int|`maxNumberOfRejectedRequestPerConnection`|The maximum number of rejected requests of a broker in a certain time frame (30 seconds) after the current connection is closed and the client creates a new connection to connect to a different broker|50
-int|`keepAliveIntervalSeconds`|Seconds of keeping alive interval for each client broker connection|30
-int|`connectionTimeoutMs`|Duration of waiting for a connection to a broker to be established <br /><br />If the duration passes without a response from a broker, the connection attempt is dropped|10000
-int|`requestTimeoutMs`|Maximum duration for completing a request |60000
-int|`defaultBackoffIntervalNanos`| Default duration for a backoff interval | TimeUnit.MILLISECONDS.toNanos(100);
-long|`maxBackoffIntervalNanos`|Maximum duration for a backoff interval|TimeUnit.SECONDS.toNanos(30)
-SocketAddress|`socks5ProxyAddress`|SOCKS5 proxy address | None
-String|`socks5ProxyUsername`|SOCKS5 proxy username | None
-String|`socks5ProxyPassword`|SOCKS5 proxy password | None
+`serviceUrl` | String | Service URL provider for Pulsar service | None
+`authPluginClassName` | String | Name of the authentication plugin | None
+ `authParams` | String | Parameters for the authentication plugin <br /><br />**Example**<br /> key1:val1,key2:val2|None
+`operationTimeoutMs`|long|`operationTimeoutMs`|Operation timeout |30000
+`statsIntervalSeconds`|long|Interval between each stats information<br /><br />Stats is activated with positive `statsInterval`<br /><br />Set `statsIntervalSeconds` to 1 second at least. |60
+`numIoThreads`| int| The number of threads used for handling connections to brokers | 1 
+`numListenerThreads`|int|The number of threads used for handling message listeners. The listener thread pool is shared across all the consumers and readers using the "listener" model to get messages. For a given consumer, the listener is always invoked from the same thread to ensure ordering. If you want multiple threads to process a single topic, you need to create a [`shared`](https://pulsar.apache.org/docs/en/next/concepts-messaging/#shared) subscription and multiple consumers for thi [...]
+`useTcpNoDelay`| boolean| Whether to use TCP no-delay flag on the connection to disable Nagle algorithm |true
+`useTls` |boolean |Whether to use TLS encryption on the connection| false
+ `tlsTrustCertsFilePath` |string |Path to the trusted TLS certificate file|None
+`tlsAllowInsecureConnection`|boolean|Whether the Pulsar client accepts untrusted TLS certificate from broker | false
+`tlsHostnameVerificationEnable` |boolean |  Whether to enable TLS hostname verification|false
+`concurrentLookupRequest`|int|The number of concurrent lookup requests allowed to send on each broker connection to prevent overload on broker|5000
+`maxLookupRequest`|int|The maximum number of lookup requests allowed on each broker connection to prevent overload on broker | 50000
+`maxNumberOfRejectedRequestPerConnection`|int|The maximum number of rejected requests of a broker in a certain time frame (30 seconds) after the current connection is closed and the client creates a new connection to connect to a different broker|50
+`keepAliveIntervalSeconds`|int|Seconds of keeping alive interval for each client broker connection|30
+`connectionTimeoutMs`|int|Duration of waiting for a connection to a broker to be established <br /><br />If the duration passes without a response from a broker, the connection attempt is dropped|10000
+`requestTimeoutMs`|int|Maximum duration for completing a request |60000
+`defaultBackoffIntervalNanos`|int| Default duration for a backoff interval | TimeUnit.MILLISECONDS.toNanos(100);
+`maxBackoffIntervalNanos`|long|Maximum duration for a backoff interval|TimeUnit.SECONDS.toNanos(30)
+`socks5ProxyAddress`|SocketAddress|SOCKS5 proxy address | None
+`socks5ProxyUsername`|string|SOCKS5 proxy username | None
+`socks5ProxyPassword`|string|SOCKS5 proxy password | None
 
 Check out the Javadoc for the {@inject: javadoc:PulsarClient:/client/org/apache/pulsar/client/api/PulsarClient} class for a full list of configurable parameters.
 
-> In addition to client-level configuration, you can also apply [producer](#configuring-producers) and [consumer](#configuring-consumers) specific configuration as described in sections below.
+> In addition to client-level configuration, you can also apply [producer](#configure-producer) and [consumer](#configure-consumer) specific configuration as described in sections below.
+
+### Client memory allocator configuration
+You can set the client memory allocator configurations through Java properties.<br />
+
+| Property | Type |  <div>Description</div> | Default | Available values
+|---|---|---|---|---
+`pulsar.allocator.pooled` | String | If set to `true`, the client uses a direct memory pool. <br /> If set to `false`, the client uses a heap memory without pool | true | <li> true </li> <li> false </li> 
+`pulsar.allocator.exit_on_oom` | String | Whether to exit the JVM when OOM happens | false |  <li> true </li> <li> false </li>
+`pulsar.allocator.leak_detection` | String | Service URL provider for Pulsar service | Disabled | <li> Disabled </li> <li> Simple </li> <li> Advanced </li> <li> Paranoid </li>
+`pulsar.allocator.out_of_memory_policy` | String | When an OOM occurs, the client throws an exception or fallbacks to heap | FallbackToHeap | <li> ThrowException </li> <li> FallbackToHeap </li>
+
+**Example**:
+
+```
+
+-Dpulsar.allocator.pooled=true
+-Dpulsar.allocator.exit_on_oom=false
+-Dpulsar.allocator.leak_detection=Disabled
+-Dpulsar.allocator.out_of_memory_policy=ThrowException
+
+```
 
 ## Producer
 
@@ -163,7 +183,7 @@ producer.send("My message".getBytes());
 
 ```
 
-By default, producers produce messages that consist of byte arrays. You can produce different types by specifying a message [schema](#schemas).
+By default, producers produce messages that consist of byte arrays. You can produce different types by specifying a message [schema](#schema).
 
 ```java
 
@@ -203,25 +223,25 @@ stringProducer.send("My message");
 
 ### Configure producer
 
-If you instantiate a `Producer` object by specifying only a topic name as the example above, use the default configuration for producer. 
+If you instantiate a `Producer` object by specifying only a topic name as the example above, the default configuration of producer is used.
 
 If you create a producer, you can use the `loadConf` configuration. The following parameters are available in `loadConf`.
 
-Type | Name| <div>Description</div>|  Default
+Name| Type |  <div>Description</div>|  Default
 |---|---|---|---
-String|	`topicName`|	Topic name| null|
-String|`producerName`|Producer name| null
-long|`sendTimeoutMs`|Message send timeout in ms.<br /><br />If a message is not acknowledged by a server before the `sendTimeout` expires, an error occurs.|30000
-boolean|`blockIfQueueFull`|If it is set to `true`, when the outgoing message queue is full, the `Send` and `SendAsync` methods of producer block, rather than failing and throwing errors. <br /><br />If it is set to `false`, when the outgoing message queue is full, the `Send` and `SendAsync` methods of producer fail and `ProducerQueueIsFullError` exceptions occur.<br /><br />The `MaxPendingMessages` parameter determines the size of the outgoing message queue.|false
-int|`maxPendingMessages`|The maximum size of a queue holding pending messages.<br /><br />For example, a message waiting to receive an acknowledgment from a [broker](reference-terminology.md#broker). <br /><br />By default, when the queue is full, all calls to the `Send` and `SendAsync` methods fail **unless** you set `BlockIfQueueFull` to `true`.|1000
-int|`maxPendingMessagesAcrossPartitions`|The maximum number of pending messages across partitions. <br /><br />Use the setting to lower the max pending messages for each partition ({@link #setMaxPendingMessages(int)}) if the total number exceeds the configured value.|50000
-MessageRoutingMode|`messageRoutingMode`|Message routing logic for producers on [partitioned topics](concepts-architecture-overview.md#partitioned-topics).<br /><br /> Apply the logic only when setting no key on messages. <br /><br />Available options are as follows: <br /><br /><li>`pulsar.RoundRobinDistribution`: round robin<br /><br /> </li><li>`pulsar.UseSinglePartition`: publish all messages to a single partition<br /><br /></li><li>`pulsar.CustomPartition`: a custom partitioning sch [...]
-HashingScheme|`hashingScheme`|Hashing function determining the partition where you publish a particular message (**partitioned topics only**).<br /><br />Available options are as follows:<br /><br /><li> `pulsar.JavaStringHash`: the equivalent of `String.hashCode()` in Java<br /><br /></li><li> `pulsar.Murmur3_32Hash`: applies the [Murmur3](https://en.wikipedia.org/wiki/MurmurHash) hashing function<br /><br /></li><li>`pulsar.BoostHash`: applies the hashing function from C++'s [Boost](ht [...]
-ProducerCryptoFailureAction|`cryptoFailureAction`|Producer should take action when encryption fails.<br /><br /><li>**FAIL**: if encryption fails, unencrypted messages fail to send.</li><br /><li> **SEND**: if encryption fails, unencrypted messages are sent. </li>|`ProducerCryptoFailureAction.FAIL`
-long|`batchingMaxPublishDelayMicros`|Batching time period of sending messages.|TimeUnit.MILLISECONDS.toMicros(1)
-int|batchingMaxMessages|The maximum number of messages permitted in a batch.|1000
-boolean|`batchingEnabled`|Enable batching of messages. |true
-CompressionType|`compressionType`|Message data compression type used by a producer. <br /><br />Available options:<li>[`LZ4`](https://github.com/lz4/lz4)<br /></li><li>[`ZLIB`](https://zlib.net/)<br /></li><li>[`ZSTD`](https://facebook.github.io/zstd/)<br /></li><li>[`SNAPPY`](https://google.github.io/snappy/)</li>| No compression
+`topicName`| string|		Topic name| null|
+`producerName`| string|Producer name| null
+`sendTimeoutMs`| long|Message send timeout in ms.<br />If a message is not acknowledged by a server before the `sendTimeout` expires, an error occurs.|30000
+`blockIfQueueFull`|boolean|If it is set to `true`, when the outgoing message queue is full, the `Send` and `SendAsync` methods of producer block, rather than failing and throwing errors. <br />If it is set to `false`, when the outgoing message queue is full, the `Send` and `SendAsync` methods of producer fail and `ProducerQueueIsFullError` exceptions occur.<br /><br />The `MaxPendingMessages` parameter determines the size of the outgoing message queue.|false
+`maxPendingMessages`| int|The maximum size of a queue holding pending messages.<br /><br />For example, a message waiting to receive an acknowledgment from a [broker](reference-terminology.md#broker). <br /><br />By default, when the queue is full, all calls to the `Send` and `SendAsync` methods fail **unless** you set `BlockIfQueueFull` to `true`.|1000
+`maxPendingMessagesAcrossPartitions`|int|The maximum number of pending messages across partitions. <br /><br />Use the setting to lower the max pending messages for each partition ({@link #setMaxPendingMessages(int)}) if the total number exceeds the configured value.|50000
+`messageRoutingMode`| MessageRoutingMode|Message routing logic for producers on [partitioned topics](concepts-architecture-overview.md#partitioned-topics).<br /> Apply the logic only when setting no key on messages. <br />Available options are as follows: <br /><li>`pulsar.RoundRobinDistribution`: round robin</li><li>`pulsar.UseSinglePartition`: publish all messages to a single partition</li><li>`pulsar.CustomPartition`: a custom partitioning scheme</li>|<li>`pulsar.RoundRobinDistribution`</li>
+`hashingScheme`| HashingScheme|Hashing function determining the partition where you publish a particular message (**partitioned topics only**).<br />Available options are as follows:<br /><li> `pulsar.JavastringHash`: the equivalent of `string.hashCode()` in Java</li><li> `pulsar.Murmur3_32Hash`: applies the [Murmur3](https://en.wikipedia.org/wiki/MurmurHash) hashing function</li><li>`pulsar.BoostHash`: applies the hashing function from C++'s [Boost](https://www.boost.org/doc/libs/1_62_0 [...]
+`cryptoFailureAction`| ProducerCryptoFailureAction|Producer should take action when encryption fails.<br /><li>**FAIL**: if encryption fails, unencrypted messages fail to send.</li><li> **SEND**: if encryption fails, unencrypted messages are sent.</li> |`ProducerCryptoFailureAction.FAIL`
+`batchingMaxPublishDelayMicros`| long|Batching time period of sending messages.|TimeUnit.MILLISECONDS.toMicros(1)
+`batchingMaxMessages` |int|The maximum number of messages permitted in a batch.|1000
+`batchingEnabled`| boolean|Enable batching of messages. |true
+`compressionType`|CompressionType|Message data compression type used by a producer. <br />Available options:<li>[`LZ4`](https://github.com/lz4/lz4)</li><li>[`ZLIB`](https://zlib.net/)<br /></li><li>[`ZSTD`](https://facebook.github.io/zstd/)</li><li>[`SNAPPY`](https://google.github.io/snappy/)</li>| No compression
 
 You can configure parameters if you do not want to use the default configuration.
 
@@ -240,7 +260,7 @@ Producer<byte[]> producer = client.newProducer()
 
 ### Message routing
 
-When using partitioned topics, you can specify the routing mode whenever you publish messages using a producer. For more information on specifying a routing mode using the Java client, see the [Partitioned Topics](cookbooks-partitioned) cookbook.
+When using partitioned topics, you can specify the routing mode whenever you publish messages using a producer. For more information on specifying a routing mode using the Java client, see the [Partitioned Topics cookbook](cookbooks-partitioned).
 
 ### Async send
 
@@ -279,7 +299,7 @@ You can terminate the builder chain with `sendAsync()` and get a future return.
 
 In Pulsar, consumers subscribe to topics and handle messages that producers publish to those topics. You can instantiate a new [consumer](reference-terminology.md#consumer) by first instantiating a {@inject: javadoc:PulsarClient:/client/org/apache/pulsar/client/api/PulsarClient} object and passing it a URL for a Pulsar broker (as [above](#client-configuration)).
 
-Once you've instantiated a {@inject: javadoc:PulsarClient:/client/org/apache/pulsar/client/api/PulsarClient} object, you can create a {@inject: javadoc:Consumer:/client/org/apache/pulsar/client/api/Consumer} by specifying a [topic](reference-terminology.md#topic) and a [subscription](concepts-messaging.md#subscription-modes).
+Once you've instantiated a {@inject: javadoc:PulsarClient:/client/org/apache/pulsar/client/api/PulsarClient} object, you can create a {@inject: javadoc:Consumer:/client/org/apache/pulsar/client/api/Consumer} by specifying a [topic](reference-terminology.md#topic) and a [subscription](concepts-messaging.md#subscription-types).
 
 ```java
 
@@ -339,29 +359,30 @@ If you instantiate a `Consumer` object by specifying only a topic and subscripti
 
... 12165 lines suppressed ...

[pulsar-site] 01/04: update 2.4.x

Posted by ur...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

urfree pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/pulsar-site.git

commit 2a696df3eb288290c68c1a97c75d671a4eb80ec5
Author: LiLi <ur...@apache.org>
AuthorDate: Thu Feb 17 16:10:16 2022 +0800

    update 2.4.x
    
    Signed-off-by: LiLi <ur...@apache.org>
---
 site2/website-next/migrate/migrate-chapter.js      |  15 +-
 site2/website-next/migrate/migrate-docs.js         |   1 +
 .../versioned_docs/version-2.4.0/adaptors-spark.md |   1 -
 .../versioned_docs/version-2.4.0/adaptors-storm.md |   1 -
 .../version-2.4.0/admin-api-brokers.md             |   5 +-
 .../version-2.4.0/admin-api-clusters.md            |   7 +-
 .../version-2.4.0/admin-api-namespaces.md          | 124 +++---
 .../admin-api-non-persistent-topics.md             |   1 -
 .../version-2.4.0/admin-api-partitioned-topics.md  |   1 -
 .../version-2.4.0/admin-api-permissions.md         |   3 +-
 .../version-2.4.0/admin-api-persistent-topics.md   |   1 -
 .../version-2.4.0/admin-api-schemas.md             |   1 -
 .../version-2.4.0/admin-api-tenants.md             |  19 +-
 .../version-2.4.0/administration-dashboard.md      |  12 +-
 .../version-2.4.0/administration-load-balance.md   |  43 +-
 .../version-2.4.0/administration-proxy.md          |   3 +-
 .../version-2.4.0/administration-stats.md          |   1 -
 .../version-2.4.0/client-libraries-cpp.md          | 435 +++++++++++++++++----
 .../version-2.4.0/client-libraries-websocket.md    |  82 +++-
 .../version-2.4.0/client-libraries.md              |   4 +-
 .../concepts-architecture-overview.md              |   3 +-
 .../version-2.4.0/concepts-authentication.md       |   1 -
 .../version-2.4.0/concepts-clients.md              |   1 -
 .../version-2.4.0/concepts-multi-tenancy.md        |   1 -
 .../version-2.4.0/concepts-overview.md             |   1 -
 .../version-2.4.0/concepts-replication.md          |   1 -
 .../version-2.4.0/concepts-topic-compaction.md     |   1 -
 .../version-2.4.0/cookbooks-bookkeepermetadata.md  |   1 -
 .../version-2.4.0/cookbooks-deduplication.md       |   3 +-
 .../version-2.4.0/cookbooks-encryption.md          |   3 +-
 .../version-2.4.0/cookbooks-message-queue.md       |   1 -
 .../version-2.4.0/cookbooks-non-persistent.md      |   1 -
 .../version-2.4.0/cookbooks-partitioned.md         |   1 -
 .../version-2.4.0/cookbooks-retention-expiry.md    | 128 ++++--
 .../versioned_docs/version-2.4.0/deploy-aws.md     |   3 +-
 .../deploy-bare-metal-multi-cluster.md             | 101 ++---
 .../versioned_docs/version-2.4.0/deploy-dcos.md    |  56 +--
 .../version-2.4.0/deploy-kubernetes.md             |   1 -
 .../version-2.4.0/deploy-monitoring.md             |  15 +-
 ...nary-protocol.md => develop-binary-protocol.md} |   0
 .../{developing-cpp.md => develop-cpp.md}          |   0
 .../version-2.4.0/develop-load-manager.md          |   1 -
 .../versioned_docs/version-2.4.0/develop-tools.md  |   1 -
 .../version-2.4.0/functions-deploying.md           | 256 ------------
 .../version-2.4.0/functions-guarantees.md          |  42 --
 .../version-2.4.0/functions-metrics.md             |   1 -
 .../versioned_docs/version-2.4.0/io-cdc.md         |   1 -
 .../versioned_docs/version-2.4.0/io-connectors.md  |  18 +-
 .../versioned_docs/version-2.4.0/io-develop.md     |   1 -
 .../versioned_docs/version-2.4.0/io-overview.md    |   1 -
 .../versioned_docs/version-2.4.0/pulsar-2.0.md     |   1 -
 .../{reference-pulsar-admin.md => pulsar-admin.md} |   0
 .../version-2.4.0/reference-cli-tools.md           |  12 +-
 .../version-2.4.0/reference-terminology.md         |   1 -
 .../version-2.4.0/security-athenz.md               |   1 -
 .../version-2.4.0/security-authorization.md        |   1 -
 .../version-2.4.0/security-encryption.md           |   1 -
 .../version-2.4.0/security-extending.md            |   1 -
 .../version-2.4.0/sql-deployment-configurations.md |  94 ++++-
 .../versioned_docs/version-2.4.0/sql-overview.md   |   1 -
 .../version-2.4.0/standalone-docker.md             | 150 ++++---
 ...getting-started-standalone.md => standalone.md} |   0
 .../versioned_docs/version-2.4.1/adaptors-kafka.md |   3 +-
 .../versioned_docs/version-2.4.1/adaptors-spark.md |   1 -
 .../versioned_docs/version-2.4.1/adaptors-storm.md |   1 -
 .../version-2.4.1/admin-api-brokers.md             |   5 +-
 .../version-2.4.1/admin-api-clusters.md            |   7 +-
 .../version-2.4.1/admin-api-namespaces.md          | 124 +++---
 .../version-2.4.1/admin-api-permissions.md         |   3 +-
 .../version-2.4.1/admin-api-persistent-topics.md   |   1 -
 .../version-2.4.1/admin-api-schemas.md             |   1 -
 .../version-2.4.1/admin-api-tenants.md             |  19 +-
 .../version-2.4.1/administration-load-balance.md   |  43 +-
 .../version-2.4.1/administration-proxy.md          |   3 +-
 .../version-2.4.1/administration-stats.md          |   1 -
 .../version-2.4.1/administration-zk-bk.md          |   1 -
 .../version-2.4.1/client-libraries-cpp.md          | 435 +++++++++++++++++----
 .../version-2.4.1/client-libraries-go.md           | 162 +++++++-
 .../version-2.4.1/client-libraries-websocket.md    |  82 +++-
 .../version-2.4.1/client-libraries.md              |   4 +-
 .../concepts-architecture-overview.md              |   3 +-
 .../version-2.4.1/concepts-authentication.md       |   1 -
 .../version-2.4.1/concepts-clients.md              |   1 -
 .../version-2.4.1/concepts-multi-tenancy.md        |   1 -
 .../version-2.4.1/concepts-overview.md             |   1 -
 .../version-2.4.1/concepts-replication.md          |   1 -
 .../version-2.4.1/concepts-topic-compaction.md     |   1 -
 .../version-2.4.1/cookbooks-bookkeepermetadata.md  |   1 -
 .../version-2.4.1/cookbooks-deduplication.md       |   3 +-
 .../version-2.4.1/cookbooks-encryption.md          |   3 +-
 .../version-2.4.1/cookbooks-message-queue.md       |   1 -
 .../version-2.4.1/cookbooks-non-persistent.md      |   1 -
 .../version-2.4.1/cookbooks-partitioned.md         |   1 -
 .../version-2.4.1/cookbooks-retention-expiry.md    | 128 ++++--
 .../version-2.4.1/cookbooks-tiered-storage.md      |   7 +-
 .../versioned_docs/version-2.4.1/deploy-aws.md     |   3 +-
 .../deploy-bare-metal-multi-cluster.md             | 101 ++---
 .../deploy-bare-metal.md}                          |   0
 .../versioned_docs/version-2.4.1/deploy-dcos.md    |  56 +--
 .../version-2.4.1/deploy-kubernetes.md             |   1 -
 .../version-2.4.1/deploy-monitoring.md             |  15 +-
 .../version-2.4.1/develop-binary-protocol.md       | 105 +++--
 .../version-2.4.1/develop-load-manager.md          |   1 -
 .../versioned_docs/version-2.4.1/develop-tools.md  |   1 -
 .../versioned_docs/version-2.4.1/io-cdc.md         |   1 -
 .../versioned_docs/version-2.4.1/io-overview.md    |   1 -
 .../versioned_docs/version-2.4.1/io-quickstart.md  |   2 +-
 .../versioned_docs/version-2.4.1/pulsar-2.0.md     |   1 -
 .../{reference-pulsar-admin.md => pulsar-admin.md} |   0
 .../version-2.4.1/reference-cli-tools.md           | 172 +++++---
 .../version-2.4.1/reference-terminology.md         |   1 -
 .../version-2.4.1/security-encryption.md           |   1 -
 .../version-2.4.1/security-extending.md            |   1 -
 .../versioned_docs/version-2.4.1/security-jwt.md   |   1 -
 .../version-2.4.1/security-token-admin.md          |   1 -
 .../version-2.4.1/sql-deployment-configurations.md |  94 ++++-
 .../version-2.4.1/sql-getting-started.md           |   1 -
 .../versioned_docs/version-2.4.1/sql-overview.md   |   1 -
 .../version-2.4.1/standalone-docker.md             | 150 ++++---
 ...getting-started-standalone.md => standalone.md} |   0
 .../versioned_docs/version-2.4.2/adaptors-kafka.md |   3 +-
 .../versioned_docs/version-2.4.2/adaptors-spark.md |   1 -
 .../versioned_docs/version-2.4.2/adaptors-storm.md |   1 -
 .../version-2.4.2/admin-api-brokers.md             |   5 +-
 .../version-2.4.2/admin-api-clusters.md            |   7 +-
 .../version-2.4.2/admin-api-namespaces.md          | 124 +++---
 .../version-2.4.2/admin-api-permissions.md         |   3 +-
 .../version-2.4.2/admin-api-persistent-topics.md   |   1 -
 .../version-2.4.2/admin-api-schemas.md             |   1 -
 .../version-2.4.2/admin-api-tenants.md             |  19 +-
 .../version-2.4.2/administration-geo.md            |   2 +-
 .../version-2.4.2/administration-load-balance.md   |  43 +-
 .../version-2.4.2/administration-proxy.md          |   3 +-
 .../version-2.4.2/administration-stats.md          |   1 -
 .../version-2.4.2/administration-zk-bk.md          |   1 -
 .../version-2.4.2/client-libraries-cpp.md          | 435 +++++++++++++++++----
 .../version-2.4.2/client-libraries-go.md           | 162 +++++++-
 .../version-2.4.2/client-libraries-websocket.md    |  82 +++-
 .../version-2.4.2/client-libraries.md              |   4 +-
 .../concepts-architecture-overview.md              |   3 +-
 .../version-2.4.2/concepts-authentication.md       |   1 -
 .../version-2.4.2/concepts-clients.md              |   1 -
 .../version-2.4.2/concepts-multi-tenancy.md        |   1 -
 .../version-2.4.2/concepts-overview.md             |   1 -
 .../version-2.4.2/concepts-replication.md          |   1 -
 .../version-2.4.2/concepts-topic-compaction.md     |   1 -
 .../version-2.4.2/cookbooks-bookkeepermetadata.md  |   1 -
 .../version-2.4.2/cookbooks-deduplication.md       |   3 +-
 .../version-2.4.2/cookbooks-encryption.md          |   3 +-
 .../version-2.4.2/cookbooks-message-queue.md       |   1 -
 .../version-2.4.2/cookbooks-non-persistent.md      |   1 -
 .../version-2.4.2/cookbooks-partitioned.md         |   1 -
 .../version-2.4.2/cookbooks-retention-expiry.md    | 128 ++++--
 .../version-2.4.2/cookbooks-tiered-storage.md      |   7 +-
 .../versioned_docs/version-2.4.2/deploy-aws.md     |   3 +-
 .../deploy-bare-metal-multi-cluster.md             | 101 ++---
 .../deploy-bare-metal.md}                          |   0
 .../versioned_docs/version-2.4.2/deploy-dcos.md    |  56 +--
 .../version-2.4.2/deploy-kubernetes.md             |   1 -
 .../version-2.4.2/deploy-monitoring.md             |  15 +-
 .../version-2.4.2/develop-binary-protocol.md       | 105 +++--
 .../version-2.4.2/develop-load-manager.md          |   1 -
 .../versioned_docs/version-2.4.2/develop-tools.md  |   1 -
 .../versioned_docs/version-2.4.2/io-cdc.md         |   1 -
 .../versioned_docs/version-2.4.2/io-overview.md    |   1 -
 .../versioned_docs/version-2.4.2/pulsar-2.0.md     |   1 -
 .../{reference-pulsar-admin.md => pulsar-admin.md} |   0
 .../version-2.4.2/reference-cli-tools.md           | 172 +++++---
 .../version-2.4.2/reference-terminology.md         |   1 -
 .../version-2.4.2/security-encryption.md           |   1 -
 .../version-2.4.2/security-extending.md            |   1 -
 .../versioned_docs/version-2.4.2/security-jwt.md   |   1 -
 .../version-2.4.2/security-token-admin.md          |   1 -
 .../version-2.4.2/sql-deployment-configurations.md |  94 ++++-
 .../version-2.4.2/sql-getting-started.md           |   1 -
 .../versioned_docs/version-2.4.2/sql-overview.md   |   1 -
 .../version-2.4.2/standalone-docker.md             | 150 ++++---
 ...getting-started-standalone.md => standalone.md} |   0
 site2/website-next/versions.json                   |   2 +-
 179 files changed, 3363 insertions(+), 1829 deletions(-)

diff --git a/site2/website-next/migrate/migrate-chapter.js b/site2/website-next/migrate/migrate-chapter.js
index 102629c..5f042fb 100644
--- a/site2/website-next/migrate/migrate-chapter.js
+++ b/site2/website-next/migrate/migrate-chapter.js
@@ -43,6 +43,16 @@ const migrate = (version, category, cb) => {
     return;
   }
 
+  console.log("     [" + version + ":" + category + "]migrate...");
+  let existsSidebar = [];
+  for (let docsId of sidebar) {
+    let mdpath = migrateDocs(version, category, docsId, cb);
+    if (mdpath) {
+      existsSidebar.push(docsId);
+    }
+  }
+  sidebar = existsSidebar;
+
   let new_sidebar_file = "";
   let new_sidebar = {};
 
@@ -117,11 +127,6 @@ const migrate = (version, category, cb) => {
     }
   }
   fs.writeFileSync(new_sidebar_file, JSON.stringify(new_sidebar, null, 2));
-
-  console.log("     [" + version + ":" + category + "]migrate...");
-  for (let docsId of sidebar) {
-    migrateDocs(version, category, docsId, cb);
-  }
 };
 
 module.exports = migrate;
diff --git a/site2/website-next/migrate/migrate-docs.js b/site2/website-next/migrate/migrate-docs.js
index e483c93..31f09a1 100644
--- a/site2/website-next/migrate/migrate-docs.js
+++ b/site2/website-next/migrate/migrate-docs.js
@@ -42,6 +42,7 @@ function migrate(version, chapter, docsId, cb) {
         " was not fund, skip..."
     );
   }
+  return mdpath;
 }
 
 module.exports = migrate;
diff --git a/site2/website-next/versioned_docs/version-2.4.0/adaptors-spark.md b/site2/website-next/versioned_docs/version-2.4.0/adaptors-spark.md
index e14f13b..afa5a7e 100644
--- a/site2/website-next/versioned_docs/version-2.4.0/adaptors-spark.md
+++ b/site2/website-next/versioned_docs/version-2.4.0/adaptors-spark.md
@@ -2,7 +2,6 @@
 id: adaptors-spark
 title: Pulsar adaptor for Apache Spark
 sidebar_label: "Apache Spark"
-original_id: adaptors-spark
 ---
 
 ## Spark Streaming receiver
diff --git a/site2/website-next/versioned_docs/version-2.4.0/adaptors-storm.md b/site2/website-next/versioned_docs/version-2.4.0/adaptors-storm.md
index 76d5071..9df9076 100644
--- a/site2/website-next/versioned_docs/version-2.4.0/adaptors-storm.md
+++ b/site2/website-next/versioned_docs/version-2.4.0/adaptors-storm.md
@@ -2,7 +2,6 @@
 id: adaptors-storm
 title: Pulsar adaptor for Apache Storm
 sidebar_label: "Apache Storm"
-original_id: adaptors-storm
 ---
 
 Pulsar Storm is an adaptor for integrating with [Apache Storm](http://storm.apache.org/) topologies. It provides core Storm implementations for sending and receiving data.
diff --git a/site2/website-next/versioned_docs/version-2.4.0/admin-api-brokers.md b/site2/website-next/versioned_docs/version-2.4.0/admin-api-brokers.md
index dbac453..10a90ca 100644
--- a/site2/website-next/versioned_docs/version-2.4.0/admin-api-brokers.md
+++ b/site2/website-next/versioned_docs/version-2.4.0/admin-api-brokers.md
@@ -2,7 +2,6 @@
 id: admin-api-brokers
 title: Managing Brokers
 sidebar_label: "Brokers"
-original_id: admin-api-brokers
 ---
 
 import Tabs from '@theme/Tabs';
@@ -26,9 +25,9 @@ Pulsar brokers consist of two components:
 
 [Brokers](reference-terminology.md#broker) can be managed via:
 
-* The [`brokers`](reference-pulsar-admin.md#brokers) command of the [`pulsar-admin`](reference-pulsar-admin) tool
+* The `brokers` command of the [`pulsar-admin`](https://pulsar.apache.org/tools/pulsar-admin/) tool
 * The `/admin/v2/brokers` endpoint of the admin {@inject: rest:REST:/} API
-* The `brokers` method of the {@inject: javadoc:PulsarAdmin:/admin/org/apache/pulsar/client/admin/PulsarAdmin.html} object in the [Java API](client-libraries-java)
+* The `brokers` method of the `PulsarAdmin` object in the [Java API](client-libraries-java)
 
 In addition to being configurable when you start them up, brokers can also be [dynamically configured](#dynamic-broker-configuration).
 
diff --git a/site2/website-next/versioned_docs/version-2.4.0/admin-api-clusters.md b/site2/website-next/versioned_docs/version-2.4.0/admin-api-clusters.md
index 972c7e1..8687ae6 100644
--- a/site2/website-next/versioned_docs/version-2.4.0/admin-api-clusters.md
+++ b/site2/website-next/versioned_docs/version-2.4.0/admin-api-clusters.md
@@ -2,7 +2,6 @@
 id: admin-api-clusters
 title: Managing Clusters
 sidebar_label: "Clusters"
-original_id: admin-api-clusters
 ---
 
 import Tabs from '@theme/Tabs';
@@ -13,7 +12,7 @@ import TabItem from '@theme/TabItem';
 >
 > This page only shows **some frequently used operations**.
 >
-> - For the latest and complete information about `Pulsar admin`, including commands, flags, descriptions, and more information, see [Pulsar admin doc](https://pulsar.apache.org/tools/pulsar-admin/).
+> - For the latest and complete information about `Pulsar admin`, including commands, flags, descriptions, and more, see [Pulsar admin doc](https://pulsar.apache.org/tools/pulsar-admin/)
 > 
 > - For the latest and complete information about `REST API`, including parameters, responses, samples, and more, see {@inject: rest:REST:/} API doc.
 > 
@@ -24,9 +23,9 @@ servers (aka [bookies](reference-terminology.md#bookie)), and a [ZooKeeper](http
 
 Clusters can be managed via:
 
-* The [`clusters`](reference-pulsar-admin.md#clusters) command of the [`pulsar-admin`](reference-pulsar-admin) tool
+* The `clusters` command of the [`pulsar-admin`]([reference-pulsar-admin.md](https://pulsar.apache.org/tools/pulsar-admin/)) tool
 * The `/admin/v2/clusters` endpoint of the admin {@inject: rest:REST:/} API
-* The `clusters` method of the {@inject: javadoc:PulsarAdmin:/admin/org/apache/pulsar/client/admin/PulsarAdmin} object in the [Java API](client-libraries-java)
+* The `clusters` method of the `PulsarAdmin` object in the [Java API](client-libraries-java)
 
 ## Clusters resources
 
diff --git a/site2/website-next/versioned_docs/version-2.4.0/admin-api-namespaces.md b/site2/website-next/versioned_docs/version-2.4.0/admin-api-namespaces.md
index 216cb6f..c53fa3c 100644
--- a/site2/website-next/versioned_docs/version-2.4.0/admin-api-namespaces.md
+++ b/site2/website-next/versioned_docs/version-2.4.0/admin-api-namespaces.md
@@ -2,7 +2,6 @@
 id: admin-api-namespaces
 title: Managing Namespaces
 sidebar_label: "Namespaces"
-original_id: admin-api-namespaces
 ---
 
 import Tabs from '@theme/Tabs';
@@ -23,9 +22,9 @@ Pulsar [namespaces](reference-terminology.md#namespace) are logical groupings of
 
 Namespaces can be managed via:
 
-* The [`namespaces`](reference-pulsar-admin.md#clusters) command of the [`pulsar-admin`](reference-pulsar-admin) tool
+* The `namespaces` command of the [`pulsar-admin`](https://pulsar.apache.org/tools/pulsar-admin/) tool
 * The `/admin/v2/namespaces` endpoint of the admin {@inject: rest:REST:/} API
-* The `namespaces` method of the {@inject: javadoc:PulsarAdmin:/admin/org/apache/pulsar/client/admin/PulsarAdmin} object in the [Java API](client-libraries-java)
+* The `namespaces` method of the `PulsarAdmin` object in the [Java API](client-libraries-java)
 
 ## Namespaces resources
 
@@ -49,8 +48,12 @@ $ pulsar-admin namespaces create test-tenant/test-namespace
 </TabItem>
 <TabItem value="REST API">
 
+```
+
 {@inject: endpoint|PUT|/admin/v2/namespaces/:tenant/:namespace|operation/createNamespace?version=@pulsar:version_number@}
 
+```
+
 </TabItem>
 <TabItem value="Java">
 
@@ -105,8 +108,12 @@ $ pulsar-admin namespaces policies test-tenant/test-namespace
 </TabItem>
 <TabItem value="REST API">
 
+```
+
 {@inject: endpoint|GET|/admin/v2/namespaces/:tenant/:namespace|operation/getPolicies?version=@pulsar:version_number@}
 
+```
+
 </TabItem>
 <TabItem value="Java">
 
@@ -142,8 +149,12 @@ test-tenant/ns2
 </TabItem>
 <TabItem value="REST API">
 
+```
+
 {@inject: endpoint|GET|/admin/v2/namespaces/:tenant|operation/getTenantNamespaces?version=@pulsar:version_number@}
 
+```
+
 </TabItem>
 <TabItem value="Java">
 
@@ -177,8 +188,12 @@ $ pulsar-admin namespaces delete test-tenant/ns1
 </TabItem>
 <TabItem value="REST API">
 
+```
+
 {@inject: endpoint|DELETE|/admin/v2/namespaces/:tenant/:namespace|operation/deleteNamespace?version=@pulsar:version_number@}
 
+```
+
 </TabItem>
 <TabItem value="Java">
 
@@ -196,7 +211,7 @@ admin.namespaces().deleteNamespace(namespace);
 
 #### Set replication cluster
 
-It sets replication clusters for a namespace, so Pulsar can internally replicate publish message from one colo to another colo.
+You can set replication clusters for a namespace to enable Pulsar to internally replicate the published messages from one colocation facility to another.
 
 <Tabs 
   defaultValue="pulsar-admin"
@@ -234,7 +249,7 @@ admin.namespaces().setNamespaceReplicationClusters(namespace, clusters);
 
 #### Get replication cluster
 
-It gives a list of replication clusters for a given namespace.
+You can get the list of replication clusters for a given namespace.
 
 <Tabs 
   defaultValue="pulsar-admin"
@@ -281,13 +296,13 @@ admin.namespaces().getNamespaceReplicationClusters(namespace)
 
 Backlog quota helps the broker to restrict bandwidth/storage of a namespace once it reaches a certain threshold limit. Admin can set the limit and take corresponding action after the limit is reached.
 
-  1.  producer_request_hold: broker will hold and not persist produce request payload
+  1.  producer_request_hold: broker holds but not persists produce request payload
 
-  2.  producer_exception: broker disconnects with the client by giving an exception.
+  2.  producer_exception: broker disconnects with the client by giving an exception
 
-  3.  consumer_backlog_eviction: broker will start discarding backlog messages
+  3.  consumer_backlog_eviction: broker starts discarding backlog messages
 
-  Backlog quota restriction can be taken care by defining restriction of backlog-quota-type: destination_storage
+Backlog quota restriction can be taken care by defining restriction of backlog-quota-type: destination_storage.
 
 <Tabs 
   defaultValue="pulsar-admin"
@@ -300,12 +315,6 @@ $ pulsar-admin namespaces set-backlog-quota --limit 10G --limitTime 36000 --poli
 
 ```
 
-```
-
-N/A
-
-```
-
 </TabItem>
 <TabItem value="REST API">
 
@@ -330,7 +339,7 @@ admin.namespaces().setBacklogQuota(namespace, new BacklogQuota(limit, limitTime,
 
 #### Get backlog quota policies
 
-It shows a configured backlog quota for a given namespace.
+You can get a configured backlog quota for a given namespace.
 
 <Tabs 
   defaultValue="pulsar-admin"
@@ -378,7 +387,7 @@ admin.namespaces().getBacklogQuotaMap(namespace);
 
 #### Remove backlog quota policies
 
-It removes backlog quota policies for a given namespace
+You can remove backlog quota policies for a given namespace.
 
 <Tabs 
   defaultValue="pulsar-admin"
@@ -391,12 +400,6 @@ $ pulsar-admin namespaces remove-backlog-quota test-tenant/ns1
 
 ```
 
-```
-
-N/A
-
-```
-
 </TabItem>
 <TabItem value="REST API">
 
@@ -423,7 +426,7 @@ admin.namespaces().removeBacklogQuota(namespace, backlogQuotaType)
 
 #### Set persistence policies
 
-Persistence policies allow to configure persistency-level for all topic messages under a given namespace.
+Persistence policies allow users to configure persistency-level for all topic messages under a given namespace.
 
   -   Bookkeeper-ack-quorum: Number of acks (guaranteed copies) to wait for each entry, default: 0
 
@@ -444,12 +447,6 @@ $ pulsar-admin namespaces set-persistence --bookkeeper-ack-quorum 2 --bookkeeper
 
 ```
 
-```
-
-N/A
-
-```
-
 </TabItem>
 <TabItem value="REST API">
 
@@ -474,7 +471,7 @@ admin.namespaces().setPersistence(namespace,new PersistencePolicies(bookkeeperEn
 
 #### Get persistence policies
 
-It shows the configured persistence policies of a given namespace.
+You can get the configured persistence policies of a given namespace.
 
 <Tabs 
   defaultValue="pulsar-admin"
@@ -537,12 +534,6 @@ $ pulsar-admin namespaces unload --bundle 0x00000000_0xffffffff test-tenant/ns1
 
 ```
 
-```
-
-N/A
-
-```
-
 </TabItem>
 <TabItem value="REST API">
 
@@ -567,8 +558,7 @@ admin.namespaces().unloadNamespaceBundle(namespace, bundle)
 
 #### Split namespace bundles
 
-Each namespace bundle can contain multiple topics and each bundle can be served by only one broker. 
-If a single bundle is creating an excessive load on a broker, an admin splits the bundle using this command permitting one or more of the new bundles to be unloaded thus spreading the load across the brokers.
+One namespace bundle can contain multiple topics but can be served by only one broker. If a single bundle is creating an excessive load on a broker, an admin can split the bundle using the command below, permitting one or more of the new bundles to be unloaded, thus balancing the load across the brokers.
 
 <Tabs 
   defaultValue="pulsar-admin"
@@ -581,12 +571,6 @@ $ pulsar-admin namespaces split-bundle --bundle 0x00000000_0xffffffff test-tenan
 
 ```
 
-```
-
-N/A
-
-```
-
 </TabItem>
 <TabItem value="REST API">
 
@@ -613,7 +597,7 @@ admin.namespaces().splitNamespaceBundle(namespace, bundle)
 
 #### Set message-ttl
 
-It configures message’s time to live (in seconds) duration.
+You can configure the time to live (in seconds) duration for messages. In the example below, the message-ttl is set as 100s.
 
 <Tabs 
   defaultValue="pulsar-admin"
@@ -626,12 +610,6 @@ $ pulsar-admin namespaces set-message-ttl --messageTTL 100 test-tenant/ns1
 
 ```
 
-```
-
-N/A
-
-```
-
 </TabItem>
 <TabItem value="REST API">
 
@@ -656,7 +634,7 @@ admin.namespaces().setNamespaceMessageTTL(namespace, messageTTL)
 
 #### Get message-ttl
 
-It gives a message ttl of configured namespace.
+When the message-ttl for a namespace is set, you can use the command below to get the configured value. This example comtinues the example of the command `set message-ttl`, so the returned value is 100(s).
 
 <Tabs 
   defaultValue="pulsar-admin"
@@ -684,6 +662,12 @@ $ pulsar-admin namespaces get-message-ttl test-tenant/ns1
 
 ```
 
+```
+
+100
+
+```
+
 </TabItem>
 <TabItem value="Java">
 
@@ -693,6 +677,12 @@ admin.namespaces().getNamespaceMessageTTL(namespace)
 
 ```
 
+```
+
+100
+
+```
+
 </TabItem>
 
 </Tabs>
@@ -712,12 +702,6 @@ $ pulsar-admin namespaces remove-message-ttl test-tenant/ns1
 
 ```
 
-```
-
-100
-
-```
-
 </TabItem>
 <TabItem value="REST API">
 
@@ -758,12 +742,6 @@ $ pulsar-admin namespaces clear-backlog --sub my-subscription test-tenant/ns1
 
 ```
 
-```
-
-N/A
-
-```
-
 </TabItem>
 <TabItem value="REST API">
 
@@ -801,12 +779,6 @@ $ pulsar-admin namespaces clear-backlog  --bundle 0x00000000_0xffffffff  --sub m
 
 ```
 
-```
-
-N/A
-
-```
-
 </TabItem>
 <TabItem value="REST API">
 
@@ -842,13 +814,7 @@ Each namespace contains multiple topics and the retention size (storage size) of
 
 ```
 
-$ pulsar-admin set-retention --size 100 --time 10 test-tenant/ns1
-
-```
-
-```
-
-N/A
+$ pulsar-admin namespaces set-retention --size 100 --time 10 test-tenant/ns1
 
 ```
 
@@ -932,9 +898,7 @@ disables the throttling.
 :::note
 
 - If neither `clusterDispatchRate` nor `topicDispatchRate` is configured, dispatch throttling is disabled.
->
 - If `topicDispatchRate` is not configured, `clusterDispatchRate` takes effect.
-> 
 - If `topicDispatchRate` is configured, `topicDispatchRate` takes effect.
 
 :::
diff --git a/site2/website-next/versioned_docs/version-2.4.0/admin-api-non-persistent-topics.md b/site2/website-next/versioned_docs/version-2.4.0/admin-api-non-persistent-topics.md
index 12220de..78dac35 100644
--- a/site2/website-next/versioned_docs/version-2.4.0/admin-api-non-persistent-topics.md
+++ b/site2/website-next/versioned_docs/version-2.4.0/admin-api-non-persistent-topics.md
@@ -2,7 +2,6 @@
 id: admin-api-non-persistent-topics
 title: Managing non-persistent topics
 sidebar_label: "Non-Persistent topics"
-original_id: admin-api-non-persistent-topics
 ---
 
 For details of the content, refer to [manage topics](admin-api-topics).
\ No newline at end of file
diff --git a/site2/website-next/versioned_docs/version-2.4.0/admin-api-partitioned-topics.md b/site2/website-next/versioned_docs/version-2.4.0/admin-api-partitioned-topics.md
index 6734586..7221b3d 100644
--- a/site2/website-next/versioned_docs/version-2.4.0/admin-api-partitioned-topics.md
+++ b/site2/website-next/versioned_docs/version-2.4.0/admin-api-partitioned-topics.md
@@ -2,7 +2,6 @@
 id: admin-api-partitioned-topics
 title: Managing partitioned topics
 sidebar_label: "Partitioned topics"
-original_id: admin-api-partitioned-topics
 ---
 
 For details of the content, refer to [manage topics](admin-api-topics).
\ No newline at end of file
diff --git a/site2/website-next/versioned_docs/version-2.4.0/admin-api-permissions.md b/site2/website-next/versioned_docs/version-2.4.0/admin-api-permissions.md
index e2ca469..faedbf1 100644
--- a/site2/website-next/versioned_docs/version-2.4.0/admin-api-permissions.md
+++ b/site2/website-next/versioned_docs/version-2.4.0/admin-api-permissions.md
@@ -2,7 +2,6 @@
 id: admin-api-permissions
 title: Managing permissions
 sidebar_label: "Permissions"
-original_id: admin-api-permissions
 ---
 
 import Tabs from '@theme/Tabs';
@@ -13,7 +12,7 @@ import TabItem from '@theme/TabItem';
 >
 > This page only shows **some frequently used operations**.
 >
-> - For the latest and complete information about `Pulsar admin`, including commands, flags, descriptions, and more information, see [Pulsar admin doc](https://pulsar.apache.org/tools/pulsar-admin/).
+> - For the latest and complete information about `Pulsar admin`, including commands, flags, descriptions, and more, see [Pulsar admin doc](https://pulsar.apache.org/tools/pulsar-admin/)
 > 
 > - For the latest and complete information about `REST API`, including parameters, responses, samples, and more, see {@inject: rest:REST:/} API doc.
 > 
diff --git a/site2/website-next/versioned_docs/version-2.4.0/admin-api-persistent-topics.md b/site2/website-next/versioned_docs/version-2.4.0/admin-api-persistent-topics.md
index b6d293b..8a7abae 100644
--- a/site2/website-next/versioned_docs/version-2.4.0/admin-api-persistent-topics.md
+++ b/site2/website-next/versioned_docs/version-2.4.0/admin-api-persistent-topics.md
@@ -2,7 +2,6 @@
 id: admin-api-persistent-topics
 title: Managing persistent topics
 sidebar_label: "Persistent topics"
-original_id: admin-api-persistent-topics
 ---
 
 For details of the content, refer to [manage topics](admin-api-topics).
\ No newline at end of file
diff --git a/site2/website-next/versioned_docs/version-2.4.0/admin-api-schemas.md b/site2/website-next/versioned_docs/version-2.4.0/admin-api-schemas.md
index 9ffe21f..8399a03 100644
--- a/site2/website-next/versioned_docs/version-2.4.0/admin-api-schemas.md
+++ b/site2/website-next/versioned_docs/version-2.4.0/admin-api-schemas.md
@@ -2,6 +2,5 @@
 id: admin-api-schemas
 title: Managing Schemas
 sidebar_label: "Schemas"
-original_id: admin-api-schemas
 ---
 
diff --git a/site2/website-next/versioned_docs/version-2.4.0/admin-api-tenants.md b/site2/website-next/versioned_docs/version-2.4.0/admin-api-tenants.md
index fe68336..570ac31 100644
--- a/site2/website-next/versioned_docs/version-2.4.0/admin-api-tenants.md
+++ b/site2/website-next/versioned_docs/version-2.4.0/admin-api-tenants.md
@@ -2,7 +2,6 @@
 id: admin-api-tenants
 title: Managing Tenants
 sidebar_label: "Tenants"
-original_id: admin-api-tenants
 ---
 
 import Tabs from '@theme/Tabs';
@@ -13,7 +12,7 @@ import TabItem from '@theme/TabItem';
 >
 > This page only shows **some frequently used operations**.
 >
-> - For the latest and complete information about `Pulsar admin`, including commands, flags, descriptions, and more information, see [Pulsar admin doc](https://pulsar.apache.org/tools/pulsar-admin/).
+> - For the latest and complete information about `Pulsar admin`, including commands, flags, descriptions, and more, see [Pulsar admin doc](https://pulsar.apache.org/tools/pulsar-admin/)
 > 
 > - For the latest and complete information about `REST API`, including parameters, responses, samples, and more, see {@inject: rest:REST:/} API doc.
 > 
@@ -80,22 +79,26 @@ $ pulsar-admin tenants create my-tenant
 
 ```
 
-When creating a tenant, you can assign admin roles using the `-r`/`--admin-roles` flag. You can specify multiple roles as a comma-separated list. Here are some examples:
+When creating a tenant, you can optionally assign admin roles using the `-r`/`--admin-roles`
+flag, and clusters using the `-c`/`--allowed-clusters` flag. You can specify multiple values
+as a comma-separated list. Here are some examples:
 
 ```shell
 
 $ pulsar-admin tenants create my-tenant \
-  --admin-roles role1,role2,role3
+  --admin-roles role1,role2,role3 \
+  --allowed-clusters cluster1
 
 $ pulsar-admin tenants create my-tenant \
   -r role1
+  -c cluster1
 
 ```
 
 </TabItem>
 <TabItem value="REST API">
 
-{@inject: endpoint|POST|/admin/v2/tenants/:tenant|operation/createTenant?version=@pulsar:version_number@}
+{@inject: endpoint|PUT|/admin/v2/tenants/:tenant|operation/createTenant?version=@pulsar:version_number@}
 
 </TabItem>
 <TabItem value="JAVA">
@@ -140,7 +143,7 @@ $ pulsar-admin tenants get my-tenant
 </TabItem>
 <TabItem value="REST API">
 
-{@inject: endpoint|GET|/admin/v2/tenants/:cluster|operation/getTenant?version=@pulsar:version_number@}
+{@inject: endpoint|GET|/admin/v2/tenants/:tenant|operation/getTenant?version=@pulsar:version_number@}
 
 </TabItem>
 <TabItem value="JAVA">
@@ -175,7 +178,7 @@ $ pulsar-admin tenants delete my-tenant
 </TabItem>
 <TabItem value="REST API">
 
-{@inject: endpoint|DELETE|/admin/v2/tenants/:cluster|operation/deleteTenant?version=@pulsar:version_number@}
+{@inject: endpoint|DELETE|/admin/v2/tenants/:tenant|operation/deleteTenant?version=@pulsar:version_number@}
 
 </TabItem>
 <TabItem value="JAVA">
@@ -210,7 +213,7 @@ $ pulsar-admin tenants update my-tenant
 </TabItem>
 <TabItem value="REST API">
 
-{@inject: endpoint|DELETE|/admin/v2/tenants/:cluster|operation/updateTenant?version=@pulsar:version_number@}
+{@inject: endpoint|POST|/admin/v2/tenants/:tenant|operation/updateTenant?version=@pulsar:version_number@}
 
 </TabItem>
 <TabItem value="JAVA">
diff --git a/site2/website-next/versioned_docs/version-2.4.0/administration-dashboard.md b/site2/website-next/versioned_docs/version-2.4.0/administration-dashboard.md
index 514b076..1eb0404 100644
--- a/site2/website-next/versioned_docs/version-2.4.0/administration-dashboard.md
+++ b/site2/website-next/versioned_docs/version-2.4.0/administration-dashboard.md
@@ -7,7 +7,7 @@ original_id: administration-dashboard
 
 :::note
 
-Pulsar dashboard is deprecated. If you want to manage and monitor the stats of your topics, use [Pulsar Manager](administration-pulsar-manager). 
+Pulsar dashboard is deprecated. We recommend you use [Pulsar Manager](administration-pulsar-manager) to manage and monitor the stats of your topics. 
 
 :::
 
@@ -53,17 +53,17 @@ $ docker run -p 80:80 \
 ```
 
  
-You need to specify only one service URL for a Pulsar cluster. Internally, the collector figures out all the existing clusters and the brokers from where it needs to pull the metrics. If you connect the dashboard to Pulsar running in standalone mode, the URL is `http://<broker-ip>:8080` by default. `<broker-ip>` is the ip address or hostname of the machine running Pulsar standalone. The ip address or hostname should be accessible from the docker instance running dashboard.
+You need to specify only one service URL for a Pulsar cluster. Internally, the collector figures out all the existing clusters and the brokers from where it needs to pull the metrics. If you connect the dashboard to Pulsar running in standalone mode, the URL is `http://<broker-ip>:8080` by default. `<broker-ip>` is the IP address or hostname of the machine that runs Pulsar standalone. The IP address or hostname should be accessible from the running dashboard in the docker instance.
 
-Once the Docker container runs, the web dashboard is accessible via `localhost` or whichever host that Docker uses.
+Once the Docker container starts, the web dashboard is accessible via `localhost` or whichever host that Docker uses.
 
-> The `SERVICE_URL` that the dashboard uses needs to be reachable from inside the Docker container
+> The `SERVICE_URL` that the dashboard uses needs to be reachable from inside the Docker container.
 
 If the Pulsar service runs in standalone mode in `localhost`, the `SERVICE_URL` has to
-be the IP of the machine.
+be the IP address of the machine.
 
 Similarly, given the Pulsar standalone advertises itself with localhost by default, you need to
-explicitly set the advertise address to the host IP. For example:
+explicitly set the advertise address to the host IP address. For example:
 
 ```shell
 
diff --git a/site2/website-next/versioned_docs/version-2.4.0/administration-load-balance.md b/site2/website-next/versioned_docs/version-2.4.0/administration-load-balance.md
index 3efba60..834b156 100644
--- a/site2/website-next/versioned_docs/version-2.4.0/administration-load-balance.md
+++ b/site2/website-next/versioned_docs/version-2.4.0/administration-load-balance.md
@@ -2,13 +2,11 @@
 id: administration-load-balance
 title: Pulsar load balance
 sidebar_label: "Load balance"
-original_id: administration-load-balance
 ---
 
 ## Load balance across Pulsar brokers
 
-Pulsar is an horizontally scalable messaging system, so the traffic
-in a logical cluster must be spread across all the available Pulsar brokers as evenly as possible, which is a core requirement.
+Pulsar is an horizontally scalable messaging system, so the traffic in a logical cluster must be balanced across all the available Pulsar brokers as evenly as possible, which is a core requirement.
 
 You can use multiple settings and tools to control the traffic distribution which require a bit of context to understand how the traffic is managed in Pulsar. Though, in most cases, the core requirement mentioned above is true out of the box and you should not worry about it. 
 
@@ -36,11 +34,9 @@ Instead of individual topic or partition assignment, each broker takes ownership
 
 The namespace is the "administrative" unit: many config knobs or operations are done at the namespace level.
 
-For assignment, a namespaces is sharded into a list of "bundles", with each bundle comprising
-a portion of overall hash range of the namespace.
+For assignment, a namespaces is sharded into a list of "bundles", with each bundle comprising a portion of overall hash range of the namespace.
 
-Topics are assigned to a particular bundle by taking the hash of the topic name and checking in which
-bundle the hash falls into.
+Topics are assigned to a particular bundle by taking the hash of the topic name and checking in which bundle the hash falls into.
 
 Each bundle is independent of the others and thus is independently assigned to different brokers.
 
@@ -72,8 +68,7 @@ On the same note, it is beneficial to start with more bundles than the number of
 
 ### Unload topics and bundles
 
-You can "unload" a topic in Pulsar with admin operation. Unloading means to close the topics,
-release ownership and reassign the topics to a new broker, based on current load.
+You can "unload" a topic in Pulsar with admin operation. Unloading means to close the topics, release ownership and reassign the topics to a new broker, based on current load.
 
 When unloading happens, the client experiences a small latency blip, typically in the order of tens of milliseconds, while the topic is reassigned.
 
@@ -97,9 +92,11 @@ pulsar-admin namespaces unload tenant/namespace
 
 ### Split namespace bundles 
 
-Since the load for the topics in a bundle might change over time, or predicting upfront might just be hard, brokers can split bundles into two. The new smaller bundles can be reassigned to different brokers.
+Since the load for the topics in a bundle might change over time and predicting the load might be hard, bundle split is designed to deal with these issues. The broker splits a bundle into two and the new smaller bundles can be reassigned to different brokers.
 
-The splitting happens based on some tunable thresholds. Any existing bundle that exceeds any of the threshold is a candidate to be split. By default the newly split bundles are also immediately offloaded to other brokers, to facilitate the traffic distribution.
+The splitting is based on some tunable thresholds. Any existing bundle that exceeds any of the threshold is a candidate to be split. By default the newly split bundles are also immediately offloaded to other brokers, to facilitate the traffic distribution. 
+
+You can split namespace bundles in two ways, by setting `supportedNamespaceBundleSplitAlgorithms` to `range_equally_divide` or `topic_count_equally_divide` in `broker.conf` file. The former splits the bundle into two parts with the same hash range size; the latter splits the bundle into two parts with the same number of topics. You can also configure other parameters for namespace bundles.
 
 ```properties
 
@@ -130,13 +127,11 @@ loadBalancerNamespaceMaximumBundles=128
 
 The support for automatic load shedding is available in the load manager of Pulsar. This means that whenever the system recognizes a particular broker is overloaded, the system forces some traffic to be reassigned to less loaded brokers.
 
-When a broker is identified as overloaded, the broker forces to "unload" a subset of the bundles, the
-ones with higher traffic, that make up for the overload percentage.
+When a broker is identified as overloaded, the broker forces to "unload" a subset of the bundles, the ones with higher traffic, that make up for the overload percentage.
 
 For example, the default threshold is 85% and if a broker is over quota at 95% CPU usage, then the broker unloads the percent difference plus a 5% margin: `(95% - 85%) + 5% = 15%`.
 
-Given the selection of bundles to offload is based on traffic (as a proxy measure for cpu, network
-and memory), broker unloads bundles for at least 15% of traffic.
+Given the selection of bundles to offload is based on traffic (as a proxy measure for cpu, network and memory), broker unloads bundles for at least 15% of traffic.
 
 The automatic load shedding is enabled by default and you can disable the automatic load shedding with this setting:
 
@@ -160,6 +155,20 @@ loadBalancerSheddingGracePeriodMinutes=30
 
 ```
 
+Pulsar supports three types of shedding strategies:
+
+##### ThresholdShedder
+This strategy tends to shed the bundles if any broker's usage is above the configured threshold. It does this by first computing the average resource usage per broker for the whole cluster. The resource usage for each broker is calculated using the following method: LocalBrokerData#getMaxResourceUsageWithWeight). The weights for each resource are configurable. Historical observations are included in the running average based on the broker's setting for loadBalancerHistoryResourcePercenta [...]
+`loadBalancerLoadSheddingStrategy=org.apache.pulsar.broker.loadbalance.impl.ThresholdShedder`
+
+##### OverloadShedder
+This strategy will attempt to shed exactly one bundle on brokers which are overloaded, that is, whose maximum system resource usage exceeds loadBalancerBrokerOverloadedThresholdPercentage. To see which resources are considered when determining the maximum system resource. A bundle is recommended for unloading off that broker if and only if the following conditions hold: The broker has at least two bundles assigned and the broker has at least one bundle that has not been unloaded recently [...]
+`loadBalancerLoadSheddingStrategy=org.apache.pulsar.broker.loadbalance.impl.OverloadShedder`
+
+##### UniformLoadShedder
+This strategy tends to distribute load uniformly across all brokers. This strategy checks laod difference between broker with highest load and broker with lowest load. If the difference is higher than configured thresholds `loadBalancerMsgRateDifferenceShedderThreshold` and `loadBalancerMsgThroughputMultiplierDifferenceShedderThreshold` then it finds out bundles which can be unloaded to distribute traffic evenly across all brokers. Configure broker with below value to use this strategy.
+`loadBalancerLoadSheddingStrategy=org.apache.pulsar.broker.loadbalance.impl.UniformLoadShedder`
+
 #### Broker overload thresholds
 
 The determinations of when a broker is overloaded is based on threshold of CPU, network and memory usage. Whenever either of those metrics reaches the threshold, the system triggers the shedding (if enabled).
@@ -175,9 +184,7 @@ loadBalancerBrokerOverloadedThresholdPercentage=85
 
 Pulsar gathers the usage stats from the system metrics.
 
-In case of network utilization, in some cases the network interface speed that Linux reports is
-not correct and needs to be manually overridden. This is the case in AWS EC2 instances with 1Gbps
-NIC speed for which the OS reports 10Gbps speed.
+In case of network utilization, in some cases the network interface speed that Linux reports is not correct and needs to be manually overridden. This is the case in AWS EC2 instances with 1Gbps NIC speed for which the OS reports 10Gbps speed.
 
 Because of the incorrect max speed, the Pulsar load manager might think the broker has not reached the NIC capacity, while in fact the broker already uses all the bandwidth and the traffic is slowed down.
 
diff --git a/site2/website-next/versioned_docs/version-2.4.0/administration-proxy.md b/site2/website-next/versioned_docs/version-2.4.0/administration-proxy.md
index c046ed3..3cef937 100644
--- a/site2/website-next/versioned_docs/version-2.4.0/administration-proxy.md
+++ b/site2/website-next/versioned_docs/version-2.4.0/administration-proxy.md
@@ -2,10 +2,9 @@
 id: administration-proxy
 title: Pulsar proxy
 sidebar_label: "Pulsar proxy"
-original_id: administration-proxy
 ---
 
-Pulsar proxy is an optional gateway. Pulsar proxy is used when direction connections between clients and Pulsar brokers are either infeasible or undesirable. For example, when you run Pulsar in a cloud environment or on [Kubernetes](https://kubernetes.io) or an analogous platform, you can run Pulsar proxy.
+Pulsar proxy is an optional gateway. Pulsar proxy is used when direct connections between clients and Pulsar brokers are either infeasible or undesirable. For example, when you run Pulsar in a cloud environment or on [Kubernetes](https://kubernetes.io) or an analogous platform, you can run Pulsar proxy.
 
 ## Configure the proxy
 
diff --git a/site2/website-next/versioned_docs/version-2.4.0/administration-stats.md b/site2/website-next/versioned_docs/version-2.4.0/administration-stats.md
index ac0c036..2ccd73c 100644
--- a/site2/website-next/versioned_docs/version-2.4.0/administration-stats.md
+++ b/site2/website-next/versioned_docs/version-2.4.0/administration-stats.md
@@ -2,7 +2,6 @@
 id: administration-stats
 title: Pulsar stats
 sidebar_label: "Pulsar statistics"
-original_id: administration-stats
 ---
 
 ## Partitioned topics
diff --git a/site2/website-next/versioned_docs/version-2.4.0/client-libraries-cpp.md b/site2/website-next/versioned_docs/version-2.4.0/client-libraries-cpp.md
index 333ec67..958861a 100644
--- a/site2/website-next/versioned_docs/version-2.4.0/client-libraries-cpp.md
+++ b/site2/website-next/versioned_docs/version-2.4.0/client-libraries-cpp.md
@@ -2,7 +2,6 @@
 id: client-libraries-cpp
 title: Pulsar C++ client
 sidebar_label: "C++"
-original_id: client-libraries-cpp
 ---
 
 You can use Pulsar C++ client to create Pulsar producers and consumers in C++.
@@ -11,7 +10,7 @@ All the methods in producer, consumer, and reader of a C++ client are thread-saf
 
 ## Supported platforms
 
-Pulsar C++ client is supported on **Linux** and **MacOS** platforms.
+Pulsar C++ client is supported on **Linux** ,**MacOS** and **Windows** platforms.
 
 [Doxygen](http://www.doxygen.nl/)-generated API docs for the C++ client are available [here](/api/cpp).
 
@@ -21,8 +20,8 @@ You need to install the following components before using the C++ client:
 
 * [CMake](https://cmake.org/)
 * [Boost](http://www.boost.org/)
-* [Protocol Buffers](https://developers.google.com/protocol-buffers/) 2.6
-* [libcurl](https://curl.haxx.se/libcurl/)
+* [Protocol Buffers](https://developers.google.com/protocol-buffers/) >= 3
+* [libcurl](https://curl.se/libcurl/)
 * [Google Test](https://github.com/google/googletest)
 
 ## Linux
@@ -147,6 +146,12 @@ $ rpm -ivh apache-pulsar-client*.rpm
 
 After you install RPM successfully, Pulsar libraries are in the `/usr/lib` directory.
 
+:::note
+
+If you get the error that `libpulsar.so: cannot open shared object file: No such file or directory` when starting Pulsar client, you may need to run `ldconfig` first.
+
+:::
+
 ### Install Debian
 
 1. Download a Debian package from the links in the table. 
@@ -236,10 +241,8 @@ $ export OPENSSL_INCLUDE_DIR=/usr/local/opt/openssl/include/
 $ export OPENSSL_ROOT_DIR=/usr/local/opt/openssl/
 
 # Protocol Buffers installation
-$ brew tap homebrew/versions
-$ brew install protobuf260
-$ brew install boost
-$ brew install log4cxx
+$ brew install protobuf boost boost-python log4cxx
+# If you are using python3, you need to install boost-python3 
 
 # Google Test installation
 $ git clone https://github.com/google/googletest.git
@@ -269,6 +272,50 @@ brew install libpulsar
 
 ```
 
+## Windows (64-bit)
+
+### Compilation
+
+1. Clone the Pulsar repository.
+
+```shell
+
+$ git clone https://github.com/apache/pulsar
+
+```
+
+2. Install all necessary dependencies.
+
+```shell
+
+cd ${PULSAR_HOME}/pulsar-client-cpp
+vcpkg install --feature-flags=manifests --triplet x64-windows
+
+```
+
+3. Build C++ libraries.
+
+```shell
+
+cmake -B ./build -A x64 -DBUILD_PYTHON_WRAPPER=OFF -DBUILD_TESTS=OFF -DVCPKG_TRIPLET=x64-windows -DCMAKE_BUILD_TYPE=Release -S .
+cmake --build ./build --config Release
+
+```
+
+> **NOTE**
+>
+> 1. For Windows 32-bit, you need to use `-A Win32` and `-DVCPKG_TRIPLET=x86-windows`.
+> 2. For MSVC Debug mode, you need to replace `Release` with `Debug` for both `CMAKE_BUILD_TYPE` variable and `--config` option.
+
+4. Client libraries are available in the following places.
+
+```
+
+${PULSAR_HOME}/pulsar-client-cpp/build/lib/Release/pulsar.lib
+${PULSAR_HOME}/pulsar-client-cpp/build/lib/Release/pulsar.dll
+
+```
+
 ## Connection URLs
 
 To connect Pulsar using client libraries, you need to specify a Pulsar protocol URL.
@@ -299,109 +346,361 @@ pulsar+ssl://pulsar.us-west.example.com:6651
 
 ## Create a consumer
 
-To use Pulsar as a consumer, you need to create a consumer on the C++ client. The following is an example. 
+To use Pulsar as a consumer, you need to create a consumer on the C++ client. There are two main ways of using the consumer:
+- [Blocking style](#blocking-example): synchronously calling `receive(msg)`.
+- [Non-blocking](#consumer-with-a-message-listener) (event based) style: using a message listener.
+
+### Blocking example
+
+The benefit of this approach is that it is the simplest code. Simply keeps calling `receive(msg)` which blocks until a message is received.
+
+This example starts a subscription at the earliest offset and consumes 100 messages.
 
 ```c++
 
-Client client("pulsar://localhost:6650");
+#include <pulsar/Client.h>
+
+using namespace pulsar;
+
+int main() {
+    Client client("pulsar://localhost:6650");
+
+    Consumer consumer;
+    ConsumerConfiguration config;
+    config.setSubscriptionInitialPosition(InitialPositionEarliest);
+    Result result = client.subscribe("persistent://public/default/my-topic", "consumer-1", config, consumer);
+    if (result != ResultOk) {
+        std::cout << "Failed to subscribe: " << result << std::endl;
+        return -1;
+    }
 
-Consumer consumer;
-Result result = client.subscribe("my-topic", "my-subscription-name", consumer);
-if (result != ResultOk) {
-    LOG_ERROR("Failed to subscribe: " << result);
-    return -1;
+    Message msg;
+    int ctr = 0;
+    // consume 100 messages
+    while (ctr < 100) {
+        consumer.receive(msg);
+        std::cout << "Received: " << msg
+            << "  with payload '" << msg.getDataAsString() << "'" << std::endl;
+
+        consumer.acknowledge(msg);
+        ctr++;
+    }
+
+    std::cout << "Finished consuming synchronously!" << std::endl;
+
+    client.close();
+    return 0;
 }
 
-Message msg;
+```
+
+### Consumer with a message listener
+
+You can avoid  running a loop with blocking calls with an event based style by using a message listener which is invoked for each message that is received.
+
+This example starts a subscription at the earliest offset and consumes 100 messages.
 
-while (true) {
-    consumer.receive(msg);
-    LOG_INFO("Received: " << msg
-            << "  with payload '" << msg.getDataAsString() << "'");
+```c++
+
+#include <pulsar/Client.h>
+#include <atomic>
+#include <thread>
+
+using namespace pulsar;
+
+std::atomic<uint32_t> messagesReceived;
+
+void handleAckComplete(Result res) {
+    std::cout << "Ack res: " << res << std::endl;
+}
 
-    consumer.acknowledge(msg);
+void listener(Consumer consumer, const Message& msg) {
+    std::cout << "Got message " << msg << " with content '" << msg.getDataAsString() << "'" << std::endl;
+    messagesReceived++;
+    consumer.acknowledgeAsync(msg.getMessageId(), handleAckComplete);
 }
 
-client.close();
+int main() {
+    Client client("pulsar://localhost:6650");
+
+    Consumer consumer;
+    ConsumerConfiguration config;
+    config.setMessageListener(listener);
+    config.setSubscriptionInitialPosition(InitialPositionEarliest);
+    Result result = client.subscribe("persistent://public/default/my-topic", "consumer-1", config, consumer);
+    if (result != ResultOk) {
+        std::cout << "Failed to subscribe: " << result << std::endl;
+        return -1;
+    }
+
+    // wait for 100 messages to be consumed
+    while (messagesReceived < 100) {
+        std::this_thread::sleep_for(std::chrono::milliseconds(100));
+    }
+
+    std::cout << "Finished consuming asynchronously!" << std::endl;
+
+    client.close();
+    return 0;
+}
 
 ```
 
 ## Create a producer
 
-To use Pulsar as a producer, you need to create a producer on the C++ client. The following is an example. 
+To use Pulsar as a producer, you need to create a producer on the C++ client. There are two main ways of using a producer:
+- [Blocking style](#simple-blocking-example) : each call to `send` waits for an ack from the broker.
+- [Non-blocking asynchronous style](#non-blocking-example) : `sendAsync` is called instead of `send` and a callback is supplied for when the ack is received from the broker.
+
+### Simple blocking example
+
+This example sends 100 messages using the blocking style. While simple, it does not produce high throughput as it waits for each ack to come back before sending the next message.
 
 ```c++
 
-Client client("pulsar://localhost:6650");
+#include <pulsar/Client.h>
+#include <thread>
 
-Producer producer;
-Result result = client.createProducer("my-topic", producer);
-if (result != ResultOk) {
-    LOG_ERROR("Error creating producer: " << result);
-    return -1;
-}
+using namespace pulsar;
+
+int main() {
+    Client client("pulsar://localhost:6650");
+
+    Result result = client.createProducer("persistent://public/default/my-topic", producer);
+    if (result != ResultOk) {
+        std::cout << "Error creating producer: " << result << std::endl;
+        return -1;
+    }
+
+    // Send 100 messages synchronously
+    int ctr = 0;
+    while (ctr < 100) {
+        std::string content = "msg" + std::to_string(ctr);
+        Message msg = MessageBuilder().setContent(content).setProperty("x", "1").build();
+        Result result = producer.send(msg);
+        if (result != ResultOk) {
+            std::cout << "The message " << content << " could not be sent, received code: " << result << std::endl;
+        } else {
+            std::cout << "The message " << content << " sent successfully" << std::endl;
+        }
+
+        std::this_thread::sleep_for(std::chrono::milliseconds(100));
+        ctr++;
+    }
+
+    std::cout << "Finished producing synchronously!" << std::endl;
 
-// Publish 10 messages to the topic
-for (int i = 0; i < 10; i++){
-    Message msg = MessageBuilder().setContent("my-message").build();
-    Result res = producer.send(msg);
-    LOG_INFO("Message sent: " << res);
+    client.close();
+    return 0;
 }
-client.close();
 
 ```
 
-## Enable authentication in connection URLs
-If you use TLS authentication when connecting to Pulsar, you need to add `ssl` in the connection URLs, and the default port is `6651`. The following is an example.
+### Non-blocking example
 
-```cpp
+This example sends 100 messages using the non-blocking style calling `sendAsync` instead of `send`. This allows the producer to have multiple messages inflight at a time which increases throughput.
 
-ClientConfiguration config = ClientConfiguration();
-config.setUseTls(true);
-config.setTlsTrustCertsFilePath("/path/to/cacert.pem");
-config.setTlsAllowInsecureConnection(false);
-config.setAuth(pulsar::AuthTls::create(
-            "/path/to/client-cert.pem", "/path/to/client-key.pem"););
+The producer configuration `blockIfQueueFull` is useful here to avoid `ResultProducerQueueIsFull` errors when the internal queue for outgoing send requests becomes full. Once the internal queue is full, `sendAsync` becomes blocking which can make your code simpler.
 
-Client client("pulsar+ssl://my-broker.com:6651", config);
+Without this configuration, the result code `ResultProducerQueueIsFull` is passed to the callback. You must decide how to deal with that (retry, discard etc).
+
+```c++
+
+#include <pulsar/Client.h>
+#include <thread>
+
+using namespace pulsar;
+
+std::atomic<uint32_t> acksReceived;
+
+void callback(Result code, const MessageId& msgId, std::string msgContent) {
+    // message processing logic here
+    std::cout << "Received ack for msg: " << msgContent << " with code: "
+        << code << " -- MsgID: " << msgId << std::endl;
+    acksReceived++;
+}
+
+int main() {
+    Client client("pulsar://localhost:6650");
+
+    ProducerConfiguration producerConf;
+    producerConf.setBlockIfQueueFull(true);
+    Producer producer;
+    Result result = client.createProducer("persistent://public/default/my-topic",
+                                          producerConf, producer);
+    if (result != ResultOk) {
+        std::cout << "Error creating producer: " << result << std::endl;
+        return -1;
+    }
+
+    // Send 100 messages asynchronously
+    int ctr = 0;
+    while (ctr < 100) {
+        std::string content = "msg" + std::to_string(ctr);
+        Message msg = MessageBuilder().setContent(content).setProperty("x", "1").build();
+        producer.sendAsync(msg, std::bind(callback,
+                                          std::placeholders::_1, std::placeholders::_2, content));
+
+        std::this_thread::sleep_for(std::chrono::milliseconds(100));
+        ctr++;
+    }
+
+    // wait for 100 messages to be acked
+    while (acksReceived < 100) {
+        std::this_thread::sleep_for(std::chrono::milliseconds(100));
+    }
+
+    std::cout << "Finished producing asynchronously!" << std::endl;
+
+    client.close();
+    return 0;
+}
 
 ```
 
-For complete examples, refer to [C++ client examples](https://github.com/apache/pulsar/tree/master/pulsar-client-cpp/examples).
+### Partitioned topics and lazy producers
 
-## Schema
+When scaling out a Pulsar topic, you may configure a topic to have hundreds of partitions. Likewise, you may have also scaled out your producers so there are hundreds or even thousands of producers. This can put some strain on the Pulsar brokers as when you create a producer on a partitioned topic, internally it creates one internal producer per partition which involves communications to the brokers for each one. So for a topic with 1000 partitions and 1000 producers, it ends up creating [...]
 
-This section describes some examples about schema. For more information about schema, see [Pulsar schema](schema-get-started).
+You can reduce the load caused by this combination of a large number of partitions and many producers by doing the following:
+- use SinglePartition partition routing mode (this ensures that all messages are only sent to a single, randomly selected partition)
+- use non-keyed messages (when messages are keyed, routing is based on the hash of the key and so messages will end up being sent to multiple partitions)
+- use lazy producers (this ensures that an internal producer is only created on demand when a message needs to be routed to a partition)
 
-### Create producer with Avro schema
+With our example above, that reduces the number of internal producers spread out over the 1000 producer apps from 1,000,000 to just 1000.
 
-The following example shows how to create a producer with an Avro schema.
+Note that there can be extra latency for the first message sent. If you set a low send timeout, this timeout could be reached if the initial connection handshake is slow to complete.
 
-```cpp
+```c++
 
-static const std::string exampleSchema =
-    "{\"type\":\"record\",\"name\":\"Example\",\"namespace\":\"test\","
-    "\"fields\":[{\"name\":\"a\",\"type\":\"int\"},{\"name\":\"b\",\"type\":\"int\"}]}";
-Producer producer;
 ProducerConfiguration producerConf;
-producerConf.setSchema(SchemaInfo(AVRO, "Avro", exampleSchema));
-client.createProducer("topic-avro", producerConf, producer);
+producerConf.setPartitionsRoutingMode(ProducerConfiguration::UseSinglePartition);
+producerConf.setLazyStartPartitionedProducers(true);
 
 ```
 
-### Create consumer with Avro schema
-
-The following example shows how to create a consumer with an Avro schema.
+## Enable authentication in connection URLs
+If you use TLS authentication when connecting to Pulsar, you need to add `ssl` in the connection URLs, and the default port is `6651`. The following is an example.
 
 ```cpp
 
-static const std::string exampleSchema =
-    "{\"type\":\"record\",\"name\":\"Example\",\"namespace\":\"test\","
-    "\"fields\":[{\"name\":\"a\",\"type\":\"int\"},{\"name\":\"b\",\"type\":\"int\"}]}";
-ConsumerConfiguration consumerConf;
-Consumer consumer;
-consumerConf.setSchema(SchemaInfo(AVRO, "Avro", exampleSchema));
-client.subscribe("topic-avro", "sub-2", consumerConf, consumer)
+ClientConfiguration config = ClientConfiguration();
+config.setUseTls(true);
+config.setTlsTrustCertsFilePath("/path/to/cacert.pem");
+config.setTlsAllowInsecureConnection(false);
+config.setAuth(pulsar::AuthTls::create(
+            "/path/to/client-cert.pem", "/path/to/client-key.pem"););
+
+Client client("pulsar+ssl://my-broker.com:6651", config);
 
 ```
 
+For complete examples, refer to [C++ client examples](https://github.com/apache/pulsar/tree/master/pulsar-client-cpp/examples).
+
+## Schema
+
+This section describes some examples about schema. For more information about
+schema, see [Pulsar schema](schema-get-started).
+
+### Avro schema
+
+- The following example shows how to create a producer with an Avro schema.
+
+  ```cpp
+  
+  static const std::string exampleSchema =
+      "{\"type\":\"record\",\"name\":\"Example\",\"namespace\":\"test\","
+      "\"fields\":[{\"name\":\"a\",\"type\":\"int\"},{\"name\":\"b\",\"type\":\"int\"}]}";
+  Producer producer;
+  ProducerConfiguration producerConf;
+  producerConf.setSchema(SchemaInfo(AVRO, "Avro", exampleSchema));
+  client.createProducer("topic-avro", producerConf, producer);
+  
+  ```
+
+- The following example shows how to create a consumer with an Avro schema.
+
+  ```cpp
+  
+  static const std::string exampleSchema =
+      "{\"type\":\"record\",\"name\":\"Example\",\"namespace\":\"test\","
+      "\"fields\":[{\"name\":\"a\",\"type\":\"int\"},{\"name\":\"b\",\"type\":\"int\"}]}";
+  ConsumerConfiguration consumerConf;
+  Consumer consumer;
+  consumerConf.setSchema(SchemaInfo(AVRO, "Avro", exampleSchema));
+  client.subscribe("topic-avro", "sub-2", consumerConf, consumer)
+  
+  ```
+
+### ProtobufNative schema
+
+The following example shows how to create a producer and a consumer with a ProtobufNative schema.
+​
+1. Generate the `User` class using Protobuf3. 
+
+   :::note
+
+   You need to use Protobuf3 or later versions.
+
+   :::
+
+​
+
+   ```protobuf
+   
+   syntax = "proto3";
+   
+   message User {
+       string name = 1;
+       int32 age = 2;
+   }
+   
+   ```
+
+​
+2. Include the `ProtobufNativeSchema.h` in your source code. Ensure the Protobuf dependency has been added to your project.
+​
+
+   ```c++
+   
+   #include <pulsar/ProtobufNativeSchema.h>
+   
+   ```
+
+​
+3. Create a producer to send a `User` instance.
+​
+
+   ```c++
+   
+   ProducerConfiguration producerConf;
+   producerConf.setSchema(createProtobufNativeSchema(User::GetDescriptor()));
+   Producer producer;
+   client.createProducer("topic-protobuf", producerConf, producer);
+   User user;
+   user.set_name("my-name");
+   user.set_age(10);
+   std::string content;
+   user.SerializeToString(&content);
+   producer.send(MessageBuilder().setContent(content).build());
+   
+   ```
+
+​
+4. Create a consumer to receive a `User` instance.
+​
+
+   ```c++
+   
+   ConsumerConfiguration consumerConf;
+   consumerConf.setSchema(createProtobufNativeSchema(User::GetDescriptor()));
+   consumerConf.setSubscriptionInitialPosition(InitialPositionEarliest);
+   Consumer consumer;
+   client.subscribe("topic-protobuf", "my-sub", consumerConf, consumer);
+   Message msg;
+   consumer.receive(msg);
+   User user2;
+   user2.ParseFromArray(msg.getData(), msg.getLength());
+   
+   ```
+
diff --git a/site2/website-next/versioned_docs/version-2.4.0/client-libraries-websocket.md b/site2/website-next/versioned_docs/version-2.4.0/client-libraries-websocket.md
index bc13b43..c663f97 100644
--- a/site2/website-next/versioned_docs/version-2.4.0/client-libraries-websocket.md
+++ b/site2/website-next/versioned_docs/version-2.4.0/client-libraries-websocket.md
@@ -2,7 +2,6 @@
 id: client-libraries-websocket
 title: Pulsar WebSocket API
 sidebar_label: "WebSocket"
-original_id: client-libraries-websocket
 ---
 
 Pulsar [WebSocket](https://developer.mozilla.org/en-US/docs/Web/API/WebSockets_API) API provides a simple way to interact with Pulsar using languages that do not have an official [client library](getting-started-clients). Through WebSocket, you can publish and consume messages and use features available on the [Client Features Matrix](https://github.com/apache/pulsar/wiki/Client-Features-Matrix) page.
@@ -190,7 +189,7 @@ Key | Type | Required? | Explanation
 `maxRedeliverCount` | int | no | Define a [maxRedeliverCount](http://pulsar.apache.org/api/client/org/apache/pulsar/client/api/ConsumerBuilder.html#deadLetterPolicy-org.apache.pulsar.client.api.DeadLetterPolicy-) for the consumer (default: 0). Activates [Dead Letter Topic](https://github.com/apache/pulsar/wiki/PIP-22%3A-Pulsar-Dead-Letter-Topic) feature.
 `deadLetterTopic` | string | no | Define a [deadLetterTopic](http://pulsar.apache.org/api/client/org/apache/pulsar/client/api/ConsumerBuilder.html#deadLetterPolicy-org.apache.pulsar.client.api.DeadLetterPolicy-) for the consumer (default: {topic}-{subscription}-DLQ). Activates [Dead Letter Topic](https://github.com/apache/pulsar/wiki/PIP-22%3A-Pulsar-Dead-Letter-Topic) feature.
 `pullMode` | boolean | no | Enable pull mode (default: false). See "Flow Control" below.
-`negativeAckRedeliveryDelay` | int | no | When a message is negatively acknowledged, it will be redelivered to the DLQ.
+`negativeAckRedeliveryDelay` | int | no | When a message is negatively acknowledged, the delay time before the message is redelivered (in milliseconds). The default value is 60000.
 `token` | string | no | Authentication token, this is used for the browser javascript client
 
 NB: these parameter (except `pullMode`) apply to the internal consumer of the WebSocket service.
@@ -204,23 +203,60 @@ Server will push messages on the WebSocket session:
 ```json
 
 {
-  "messageId": "CAAQAw==",
-  "payload": "SGVsbG8gV29ybGQ=",
-  "properties": {"key1": "value1", "key2": "value2"},
-  "publishTime": "2016-08-30 16:45:57.785",
-  "redeliveryCount": 4
+  "messageId": "CAMQADAA",
+  "payload": "hvXcJvHW7kOSrUn17P2q71RA5SdiXwZBqw==",
+  "properties": {},
+  "publishTime": "2021-10-29T16:01:38.967-07:00",
+  "redeliveryCount": 0,
+  "encryptionContext": {
+    "keys": {
+      "client-rsa.pem": {
+        "keyValue": "jEuwS+PeUzmCo7IfLNxqoj4h7txbLjCQjkwpaw5AWJfZ2xoIdMkOuWDkOsqgFmWwxiecakS6GOZHs94x3sxzKHQx9Oe1jpwBg2e7L4fd26pp+WmAiLm/ArZJo6JotTeFSvKO3u/yQtGTZojDDQxiqFOQ1ZbMdtMZA8DpSMuq+Zx7PqLo43UdW1+krjQfE5WD+y+qE3LJQfwyVDnXxoRtqWLpVsAROlN2LxaMbaftv5HckoejJoB4xpf/dPOUqhnRstwQHf6klKT5iNhjsY4usACt78uILT0pEPd14h8wEBidBz/vAlC/zVMEqiDVzgNS7dqEYS4iHbf7cnWVCn3Hxw==",
+        "metadata": {}
+      }
+    },
+    "param": "Tfu1PxVm6S9D3+Hk",
+    "compressionType": "NONE",
+    "uncompressedMessageSize": 0,
+    "batchSize": {
+      "empty": false,
+      "present": true
+    }
+  }
 }
 
 ```
 
-Key | Type | Required? | Explanation
-:---|:-----|:----------|:-----------
-`messageId` | string | yes | Message ID
-`payload` | string | yes | Base-64 encoded payload
-`publishTime` | string | yes | Publish timestamp
-`redeliveryCount` | number | yes | Number of times this message was already delivered
-`properties` | key-value pairs | no | Application-defined properties
-`key` | string | no |  Original routing key set by producer
+Below are the parameters in the WebSocket consumer response.
+
+- General parameters
+
+  Key | Type | Required? | Explanation
+  :---|:-----|:----------|:-----------
+  `messageId` | string | yes | Message ID
+  `payload` | string | yes | Base-64 encoded payload
+  `publishTime` | string | yes | Publish timestamp
+  `redeliveryCount` | number | yes | Number of times this message was already delivered
+  `properties` | key-value pairs | no | Application-defined properties
+  `key` | string | no |  Original routing key set by producer
+  `encryptionContext` | EncryptionContext | no | Encryption context that consumers can use to decrypt received messages
+  `param` | string | no | Initialization vector for cipher (Base64 encoding)
+  `batchSize` | string | no | Number of entries in a message (if it is a batch message)
+  `uncompressedMessageSize` | string | no | Message size before compression
+  `compressionType` | string | no | Algorithm used to compress the message payload
+
+- `encryptionContext` related parameter
+
+  Key | Type | Required? | Explanation
+  :---|:-----|:----------|:-----------
+  `keys` |key-EncryptionKey pairs | yes | Key in `key-EncryptionKey` pairs is an encryption key name. Value in `key-EncryptionKey` pairs is an encryption key object.
+
+- `encryptionKey` related parameters
+
+  Key | Type | Required? | Explanation
+  :---|:-----|:----------|:-----------
+  `keyValue` | string | yes | Encryption key (Base64 encoding)
+  `metadata` | key-value pairs | no | Application-defined metadata
 
 #### Acknowledging the message
 
@@ -454,9 +490,15 @@ TOPIC = scheme + '://localhost:8080/ws/v2/producer/persistent/public/default/my-
 
 ws = websocket.create_connection(TOPIC)
 
+# encode message
+s = "Hello World"
+firstEncoded = s.encode("UTF-8")
+binaryEncoded = base64.b64encode(firstEncoded)
+payloadString = binaryEncoded.decode('UTF-8')
+
 # Send one message as JSON
 ws.send(json.dumps({
-    'payload' : base64.b64encode('Hello World'),
+    'payload' : payloadString,
     'properties': {
         'key1' : 'value1',
         'key2' : 'value2'
@@ -466,9 +508,9 @@ ws.send(json.dumps({
 
 response =  json.loads(ws.recv())
 if response['result'] == 'ok':
-    print 'Message published successfully'
+    print( 'Message published successfully')
 else:
-    print 'Failed to publish message:', response
+    print('Failed to publish message:', response)
 ws.close()
 
 ```
@@ -495,7 +537,7 @@ while True:
     msg = json.loads(ws.recv())
     if not msg: break
 
-    print "Received: {} - payload: {}".format(msg, base64.b64decode(msg['payload']))
+    print( "Received: {} - payload: {}".format(msg, base64.b64decode(msg['payload'])))
 
     # Acknowledge successful processing
     ws.send(json.dumps({'messageId' : msg['messageId']}))
@@ -525,7 +567,7 @@ while True:
     msg = json.loads(ws.recv())
     if not msg: break
 
-    print "Received: {} - payload: {}".format(msg, base64.b64decode(msg['payload']))
+    print ( "Received: {} - payload: {}".format(msg, base64.b64decode(msg['payload'])))
 
     # Acknowledge successful processing
     ws.send(json.dumps({'messageId' : msg['messageId']}))
diff --git a/site2/website-next/versioned_docs/version-2.4.0/client-libraries.md b/site2/website-next/versioned_docs/version-2.4.0/client-libraries.md
index 23e5a06..ab5b7c4 100644
--- a/site2/website-next/versioned_docs/version-2.4.0/client-libraries.md
+++ b/site2/website-next/versioned_docs/version-2.4.0/client-libraries.md
@@ -2,7 +2,6 @@
 id: client-libraries
 title: Pulsar client libraries
 sidebar_label: "Overview"
-original_id: client-libraries
 ---
 
 Pulsar supports the following client libraries:
@@ -16,7 +15,7 @@ Pulsar supports the following client libraries:
 - [C# client](client-libraries-dotnet)
 
 ## Feature matrix
-Pulsar client feature matrix for different languages is listed on [Client Features Matrix](https://github.com/apache/pulsar/wiki/Client-Features-Matrix) page.
+Pulsar client feature matrix for different languages is listed on [Pulsar Feature Matrix (Client and Function)](https://github.com/apache/pulsar/wiki/PIP-108%3A-Pulsar-Feature-Matrix-%28Client-and-Function%29) page.
 
 ## Third-party clients
 
@@ -33,3 +32,4 @@ Besides the official released clients, multiple projects on developing Pulsar cl
 | Scala | [pulsar4s](https://github.com/sksamuel/pulsar4s) | [sksamuel](https://github.com/sksamuel) | [![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0) | Idomatic, typesafe, and reactive Scala client for Apache Pulsar |
 | Rust | [pulsar-rs](https://github.com/wyyerd/pulsar-rs) | [Wyyerd Group](https://github.com/wyyerd) | [![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0) | Future-based Rust bindings for Apache Pulsar |
 | .NET | [pulsar-client-dotnet](https://github.com/fsharplang-ru/pulsar-client-dotnet) | [Lanayx](https://github.com/Lanayx) | [![GitHub](https://img.shields.io/badge/license-MIT-green.svg)](https://opensource.org/licenses/MIT) | Native .NET client for C#/F#/VB |
+| Node.js | [pulsar-flex](https://github.com/ayeo-flex-org/pulsar-flex) | [Daniel Sinai](https://github.com/danielsinai), [Ron Farkash](https://github.com/ronfarkash), [Gal Rosenberg](https://github.com/galrose)| [![GitHub](https://img.shields.io/badge/license-MIT-green.svg)](https://opensource.org/licenses/MIT) | Native Nodejs client |
diff --git a/site2/website-next/versioned_docs/version-2.4.0/concepts-architecture-overview.md b/site2/website-next/versioned_docs/version-2.4.0/concepts-architecture-overview.md
index 6a501d2..8fe0717 100644
--- a/site2/website-next/versioned_docs/version-2.4.0/concepts-architecture-overview.md
+++ b/site2/website-next/versioned_docs/version-2.4.0/concepts-architecture-overview.md
@@ -2,7 +2,6 @@
 id: concepts-architecture-overview
 title: Architecture Overview
 sidebar_label: "Architecture"
-original_id: concepts-architecture-overview
 ---
 
 At the highest level, a Pulsar instance is composed of one or more Pulsar clusters. Clusters within an instance can [replicate](concepts-replication) data amongst themselves.
@@ -146,7 +145,7 @@ Some important things to know about the Pulsar proxy:
 
 ## Service discovery
 
-[Clients](getting-started-clients) connecting to Pulsar brokers need to be able to communicate with an entire Pulsar instance using a single URL. Pulsar provides a built-in service discovery mechanism that you can set up using the instructions in the [Deploying a Pulsar instance](deploy-bare-metal.md#service-discovery-setup) guide.
+[Clients](getting-started-clients) connecting to Pulsar brokers need to be able to communicate with an entire Pulsar instance using a single URL.
 
 You can use your own service discovery system if you'd like. If you use your own system, there is just one requirement: when a client performs an HTTP request to an endpoint, such as `http://pulsar.us-west.example.com:8080`, the client needs to be redirected to *some* active broker in the desired cluster, whether via DNS, an HTTP or IP redirect, or some other means.
 
diff --git a/site2/website-next/versioned_docs/version-2.4.0/concepts-authentication.md b/site2/website-next/versioned_docs/version-2.4.0/concepts-authentication.md
index b375ecb..335da8d 100644
--- a/site2/website-next/versioned_docs/version-2.4.0/concepts-authentication.md
+++ b/site2/website-next/versioned_docs/version-2.4.0/concepts-authentication.md
@@ -2,7 +2,6 @@
 id: concepts-authentication
 title: Authentication and Authorization
 sidebar_label: "Authentication and Authorization"
-original_id: concepts-authentication
 ---
 
 Pulsar supports a pluggable [authentication](security-overview.md) mechanism which can be configured at the proxy and/or the broker. Pulsar also supports a pluggable [authorization](security-authorization) mechanism. These mechanisms work together to identify the client and its access rights on topics, namespaces and tenants.
diff --git a/site2/website-next/versioned_docs/version-2.4.0/concepts-clients.md b/site2/website-next/versioned_docs/version-2.4.0/concepts-clients.md
index b68f76a..65201b5 100644
--- a/site2/website-next/versioned_docs/version-2.4.0/concepts-clients.md
+++ b/site2/website-next/versioned_docs/version-2.4.0/concepts-clients.md
@@ -2,7 +2,6 @@
 id: concepts-clients
 title: Pulsar Clients
 sidebar_label: "Clients"
-original_id: concepts-clients
 ---
 
 Pulsar exposes a client API with language bindings for [Java](client-libraries-java.md), [Go](client-libraries-go.md), [Python](client-libraries-python.md), [C++](client-libraries-cpp.md) and [C#](client-libraries-dotnet). The client API optimizes and encapsulates Pulsar's client-broker communication protocol and exposes a simple and intuitive API for use by applications.
diff --git a/site2/website-next/versioned_docs/version-2.4.0/concepts-multi-tenancy.md b/site2/website-next/versioned_docs/version-2.4.0/concepts-multi-tenancy.md
index be752cc..8a17e72 100644
--- a/site2/website-next/versioned_docs/version-2.4.0/concepts-multi-tenancy.md
+++ b/site2/website-next/versioned_docs/version-2.4.0/concepts-multi-tenancy.md
@@ -2,7 +2,6 @@
 id: concepts-multi-tenancy
 title: Multi Tenancy
 sidebar_label: "Multi Tenancy"
-original_id: concepts-multi-tenancy
 ---
 
 Pulsar was created from the ground up as a multi-tenant system. To support multi-tenancy, Pulsar has a concept of tenants. Tenants can be spread across clusters and can each have their own [authentication and authorization](security-overview) scheme applied to them. They are also the administrative unit at which storage quotas, [message TTL](cookbooks-retention-expiry.md#time-to-live-ttl), and isolation policies can be managed.
diff --git a/site2/website-next/versioned_docs/version-2.4.0/concepts-overview.md b/site2/website-next/versioned_docs/version-2.4.0/concepts-overview.md
index b903fa4..c76032c 100644
--- a/site2/website-next/versioned_docs/version-2.4.0/concepts-overview.md
+++ b/site2/website-next/versioned_docs/version-2.4.0/concepts-overview.md
@@ -2,7 +2,6 @@
 id: concepts-overview
 title: Pulsar Overview
 sidebar_label: "Overview"
-original_id: concepts-overview
 ---
 
 Pulsar is a multi-tenant, high-performance solution for server-to-server messaging. Pulsar was originally developed by Yahoo, it is under the stewardship of the [Apache Software Foundation](https://www.apache.org/).
diff --git a/site2/website-next/versioned_docs/version-2.4.0/concepts-replication.md b/site2/website-next/versioned_docs/version-2.4.0/concepts-replication.md
index 6e23962..11677cc 100644
--- a/site2/website-next/versioned_docs/version-2.4.0/concepts-replication.md
+++ b/site2/website-next/versioned_docs/version-2.4.0/concepts-replication.md
@@ -2,7 +2,6 @@
 id: concepts-replication
 title: Geo Replication
 sidebar_label: "Geo Replication"
-original_id: concepts-replication
 ---
 
 Pulsar enables messages to be produced and consumed in different geo-locations. For instance, your application may be publishing data in one region or market and you would like to process it for consumption in other regions or markets. [Geo-replication](administration-geo) in Pulsar enables you to do that.
diff --git a/site2/website-next/versioned_docs/version-2.4.0/concepts-topic-compaction.md b/site2/website-next/versioned_docs/version-2.4.0/concepts-topic-compaction.md
index c85e703..3356298 100644
--- a/site2/website-next/versioned_docs/version-2.4.0/concepts-topic-compaction.md
+++ b/site2/website-next/versioned_docs/version-2.4.0/concepts-topic-compaction.md
@@ -2,7 +2,6 @@
 id: concepts-topic-compaction
 title: Topic Compaction
 sidebar_label: "Topic Compaction"
-original_id: concepts-topic-compaction
 ---
 
 Pulsar was built with highly scalable [persistent storage](concepts-architecture-overview.md#persistent-storage) of message data as a primary objective. Pulsar topics enable you to persistently store as many unacknowledged messages as you need while preserving message ordering. By default, Pulsar stores *all* unacknowledged/unprocessed messages produced on a topic. Accumulating many unacknowledged messages on a topic is necessary for many Pulsar use cases but it can also be very time int [...]
diff --git a/site2/website-next/versioned_docs/version-2.4.0/cookbooks-bookkeepermetadata.md b/site2/website-next/versioned_docs/version-2.4.0/cookbooks-bookkeepermetadata.md
index b0fa98d..187cb65 100644
--- a/site2/website-next/versioned_docs/version-2.4.0/cookbooks-bookkeepermetadata.md
+++ b/site2/website-next/versioned_docs/version-2.4.0/cookbooks-bookkeepermetadata.md
@@ -1,7 +1,6 @@
 ---
 id: cookbooks-bookkeepermetadata
 title: BookKeeper Ledger Metadata
-original_id: cookbooks-bookkeepermetadata
 ---
 
 Pulsar stores data on BookKeeper ledgers, you can understand the contents of a ledger by inspecting the metadata attached to the ledger.
diff --git a/site2/website-next/versioned_docs/version-2.4.0/cookbooks-deduplication.md b/site2/website-next/versioned_docs/version-2.4.0/cookbooks-deduplication.md
index 1669afa..307fe03 100644
--- a/site2/website-next/versioned_docs/version-2.4.0/cookbooks-deduplication.md
+++ b/site2/website-next/versioned_docs/version-2.4.0/cookbooks-deduplication.md
@@ -1,8 +1,7 @@
 ---
 id: cookbooks-deduplication
 title: Message deduplication
-sidebar_label: "Message deduplication"
-original_id: cookbooks-deduplication
+sidebar_label: "Message deduplication "
 ---
 
 import Tabs from '@theme/Tabs';
diff --git a/site2/website-next/versioned_docs/version-2.4.0/cookbooks-encryption.md b/site2/website-next/versioned_docs/version-2.4.0/cookbooks-encryption.md
index f0d8fb8..fbd1c97 100644
--- a/site2/website-next/versioned_docs/version-2.4.0/cookbooks-encryption.md
+++ b/site2/website-next/versioned_docs/version-2.4.0/cookbooks-encryption.md
@@ -1,8 +1,7 @@
 ---
 id: cookbooks-encryption
 title: Pulsar Encryption
-sidebar_label: "Encryption"
-original_id: cookbooks-encryption
+sidebar_label: "Encryption "
 ---
 
 Pulsar encryption allows applications to encrypt messages at the producer and decrypt at the consumer. Encryption is performed using the public/private key pair configured by the application. Encrypted messages can only be decrypted by consumers with a valid key.
diff --git a/site2/website-next/versioned_docs/version-2.4.0/cookbooks-message-queue.md b/site2/website-next/versioned_docs/version-2.4.0/cookbooks-message-queue.md
index eb43cbd..9b93a94 100644
--- a/site2/website-next/versioned_docs/version-2.4.0/cookbooks-message-queue.md
+++ b/site2/website-next/versioned_docs/version-2.4.0/cookbooks-message-queue.md
@@ -2,7 +2,6 @@
 id: cookbooks-message-queue
 title: Using Pulsar as a message queue
 sidebar_label: "Message queue"
-original_id: cookbooks-message-queue
 ---
 
 Message queues are essential components of many large-scale data architectures. If every single work object that passes through your system absolutely *must* be processed in spite of the slowness or downright failure of this or that system component, there's a good chance that you'll need a message queue to step in and ensure that unprocessed data is retained---with correct ordering---until the required actions are taken.
diff --git a/site2/website-next/versioned_docs/version-2.4.0/cookbooks-non-persistent.md b/site2/website-next/versioned_docs/version-2.4.0/cookbooks-non-persistent.md
index 391569a..d40c4fb 100644
--- a/site2/website-next/versioned_docs/version-2.4.0/cookbooks-non-persistent.md
+++ b/site2/website-next/versioned_docs/version-2.4.0/cookbooks-non-persistent.md
@@ -2,7 +2,6 @@
 id: cookbooks-non-persistent
 title: Non-persistent messaging
 sidebar_label: "Non-persistent messaging"
-original_id: cookbooks-non-persistent
 ---
 
 **Non-persistent topics** are Pulsar topics in which message data is *never* [persistently stored](concepts-architecture-overview.md#persistent-storage) and kept only in memory. This cookbook provides:
diff --git a/site2/website-next/versioned_docs/version-2.4.0/cookbooks-partitioned.md b/site2/website-next/versioned_docs/version-2.4.0/cookbooks-partitioned.md
index 7882fb9..2589693 100644
--- a/site2/website-next/versioned_docs/version-2.4.0/cookbooks-partitioned.md
+++ b/site2/website-next/versioned_docs/version-2.4.0/cookbooks-partitioned.md
@@ -2,6 +2,5 @@
 id: cookbooks-partitioned
 title: Partitioned topics
 sidebar_label: "Partitioned Topics"
-original_id: cookbooks-partitioned
 ---
 For details of the content, refer to [manage topics](admin-api-topics).
\ No newline at end of file
diff --git a/site2/website-next/versioned_docs/version-2.4.0/cookbooks-retention-expiry.md b/site2/website-next/versioned_docs/version-2.4.0/cookbooks-retention-expiry.md
index b9353b5..738cf42 100644
--- a/site2/website-next/versioned_docs/version-2.4.0/cookbooks-retention-expiry.md
+++ b/site2/website-next/versioned_docs/version-2.4.0/cookbooks-retention-expiry.md
@@ -2,7 +2,6 @@
 id: cookbooks-retention-expiry
 title: Message retention and expiry
 sidebar_label: "Message retention and expiry"
-original_id: cookbooks-retention-expiry
 ---
 
 import Tabs from '@theme/Tabs';
@@ -36,7 +35,7 @@ By default, when a Pulsar message arrives at a broker, the message is stored unt
 
 Retention policies are useful when you use the Reader interface. The Reader interface does not use acknowledgements, and messages do not exist within backlogs. It is required to configure retention for Reader-only use cases.
 
-When you set a retention policy on topics in a namespace, you must set **both** a *size limit* and a *time limit*. You can refer to the following table to set retention policies in `pulsar-admin` and Java.
+When you set a retention policy on topics in a namespace, you must set **both** a *size limit* (via `defaultRetentionSizeInMB`) and a *time limit* (via `defaultRetentionTimeInMinutes`) . You can refer to the following table to set retention policies in `pulsar-admin` and Java.
 
 |Time limit|Size limit| Message retention      |
 |----------|----------|------------------------|
@@ -152,7 +151,10 @@ admin.namespaces().setRetention(namespace, policies);
 
 You can fetch the retention policy for a namespace by specifying the namespace. The output will be a JSON object with two keys: `retentionTimeInMinutes` and `retentionSizeInMB`.
 
-#### pulsar-admin
+<Tabs 
+  defaultValue="pulsar-admin"
+  values={[{"label":"pulsar-admin","value":"pulsar-admin"},{"label":"REST API","value":"REST API"},{"label":"Java","value":"Java"}]}>
+<TabItem value="pulsar-admin">
 
 Use the [`get-retention`](reference-pulsar-admin.md#namespaces) subcommand and specify the namespace.
 
@@ -168,11 +170,13 @@ $ pulsar-admin namespaces get-retention my-tenant/my-ns
 
 ```
 
-#### REST API
+</TabItem>
+<TabItem value="REST API">
 
 {@inject: endpoint|GET|/admin/v2/namespaces/:tenant/:namespace/retention|operation/getRetention?version=@pulsar:version_number@}
 
-#### Java
+</TabItem>
+<TabItem value="Java">
 
 ```java
 
@@ -180,15 +184,17 @@ admin.namespaces().getRetention(namespace);
 
 ```
 
+</TabItem>
+
+</Tabs>
+
 ## Backlog quotas
 
 *Backlogs* are sets of unacknowledged messages for a topic that have been stored by bookies. Pulsar stores all unacknowledged messages in backlogs until they are processed and acknowledged.
 
-You can control the allowable size of backlogs, at the namespace level, using *backlog quotas*. Setting a backlog quota involves setting:
+You can control the allowable size and/or time of backlogs, at the namespace level, using *backlog quotas*. Setting a backlog quota involves setting:
 
-TODO: Expand on is this per backlog or per topic?
-
-* an allowable *size threshold* for each topic in the namespace
+* an allowable *size and/or time threshold* for each topic in the namespace
 * a *retention policy* that determines which action the [broker](reference-terminology.md#broker) takes if the threshold is exceeded.
 
 The following retention policies are available:
@@ -210,9 +216,12 @@ Backlog quotas are handled at the namespace level. They can be managed via:
 
 You can set a size and/or time threshold and backlog retention policy for all of the topics in a [namespace](reference-terminology.md#namespace) by specifying the namespace, a size limit and/or a time limit in second, and a policy by name.
 
-#### pulsar-admin
+<Tabs 
+  defaultValue="pulsar-admin"
+  values={[{"label":"pulsar-admin","value":"pulsar-admin"},{"label":"REST API","value":"REST API"},{"label":"Java","value":"Java"}]}>
+<TabItem value="pulsar-admin">
 
-Use the [`set-backlog-quota`](reference-pulsar-admin.md#namespaces) subcommand and specify a namespace, a size limit using the `-l`/`--limit` flag, and a retention policy using the `-p`/`--policy` flag.
+Use the [`set-backlog-quota`](reference-pulsar-admin.md#namespaces) subcommand and specify a namespace, a size limit using the `-l`/`--limit` , `-lt`/`--limitTime` flag to limit backlog, a retention policy using the `-p`/`--policy` flag and a policy type using `-t`/`--type` (default is destination_storage).
 
 ##### Example
 
@@ -220,16 +229,26 @@ Use the [`set-backlog-quota`](reference-pulsar-admin.md#namespaces) subcommand a
 
 $ pulsar-admin namespaces set-backlog-quota my-tenant/my-ns \
   --limit 2G \
-  --limitTime 36000 \
   --policy producer_request_hold
 
 ```
 
-#### REST API
+```shell
+
+$ pulsar-admin namespaces set-backlog-quota my-tenant/my-ns/my-topic \
+--limitTime 3600 \
+--policy producer_request_hold \
+--type message_age
+
+```
+
+</TabItem>
+<TabItem value="REST API">
 
 {@inject: endpoint|POST|/admin/v2/namespaces/:tenant/:namespace/backlogQuota|operation/getBacklogQuotaMap?version=@pulsar:version_number@}
 
-#### Java
+</TabItem>
+<TabItem value="Java">
 
 ```java
 
@@ -240,11 +259,18 @@ admin.namespaces().setBacklogQuota(namespace, quota);
 
 ```
 
+</TabItem>
+
+</Tabs>
+
 ### Get backlog threshold and backlog retention policy
 
 You can see which size threshold and backlog retention policy has been applied to a namespace.
 
-#### pulsar-admin
+<Tabs 
+  defaultValue="pulsar-admin"
+  values={[{"label":"pulsar-admin","value":"pulsar-admin"},{"label":"REST API","value":"REST API"},{"label":"Java","value":"Java"}]}>
+<TabItem value="pulsar-admin">
 
 Use the [`get-backlog-quotas`](reference-pulsar-admin.md#pulsar-admin-namespaces-get-backlog-quotas) subcommand and specify a namespace. Here's an example:
 
@@ -260,11 +286,13 @@ $ pulsar-admin namespaces get-backlog-quotas my-tenant/my-ns
 
 ```
 
-#### REST API
+</TabItem>
+<TabItem value="REST API">
 
 {@inject: endpoint|GET|/admin/v2/namespaces/:tenant/:namespace/backlogQuotaMap|operation/getBacklogQuotaMap?version=@pulsar:version_number@}
 
-#### Java
+</TabItem>
+<TabItem value="Java">
 
 ```java
 
@@ -273,11 +301,18 @@ Map<BacklogQuota.BacklogQuotaType,BacklogQuota> quotas =
 
 ```
 
+</TabItem>
+
+</Tabs>
+
 ### Remove backlog quotas
 
-#### pulsar-admin
+<Tabs 
+  defaultValue="pulsar-admin"
+  values={[{"label":"pulsar-admin","value":"pulsar-admin"},{"label":"REST API","value":"REST API"},{"label":"Java","value":"Java"}]}>
+<TabItem value="pulsar-admin">
 
-Use the [`remove-backlog-quota`](reference-pulsar-admin.md#pulsar-admin-namespaces-remove-backlog-quota) subcommand and specify a namespace. Here's an example:
+Use the [`remove-backlog-quota`](reference-pulsar-admin.md#pulsar-admin-namespaces-remove-backlog-quota) subcommand and specify a namespace, use `t`/`--type` to specify backlog type to remove(default is destination_storage). Here's an example:
 
 ```shell
 
@@ -285,11 +320,13 @@ $ pulsar-admin namespaces remove-backlog-quota my-tenant/my-ns
 
 ```
 
-#### REST API
+</TabItem>
+<TabItem value="REST API">
 
 {@inject: endpoint|DELETE|/admin/v2/namespaces/:tenant/:namespace/backlogQuota|operation/removeBacklogQuota?version=@pulsar:version_number@}
 
-#### Java
+</TabItem>
+<TabItem value="Java">
 
 ```java
 
@@ -297,6 +334,10 @@ admin.namespaces().removeBacklogQuota(namespace);
 
 ```
 
+</TabItem>
+
+</Tabs>
+
 ### Clear backlog
 
 #### pulsar-admin
@@ -319,7 +360,10 @@ By default, Pulsar stores all unacknowledged messages forever. This can lead to
 
 ### Set the TTL for a namespace
 
-#### pulsar-admin
+<Tabs 
+  defaultValue="pulsar-admin"
+  values={[{"label":"pulsar-admin","value":"pulsar-admin"},{"label":"REST API","value":"REST API"},{"label":"Java","value":"Java"}]}>
+<TabItem value="pulsar-admin">
 
 Use the [`set-message-ttl`](reference-pulsar-admin.md#pulsar-admin-namespaces-set-message-ttl) subcommand and specify a namespace and a TTL (in seconds) using the `-ttl`/`--messageTTL` flag.
 
@@ -332,11 +376,13 @@ $ pulsar-admin namespaces set-message-ttl my-tenant/my-ns \
 
 ```
 
-#### REST API
+</TabItem>
+<TabItem value="REST API">
 
 {@inject: endpoint|POST|/admin/v2/namespaces/:tenant/:namespace/messageTTL|operation/setNamespaceMessageTTL?version=@pulsar:version_number@}
 
-#### Java
+</TabItem>
+<TabItem value="Java">
 
 ```java
 
@@ -344,9 +390,16 @@ admin.namespaces().setNamespaceMessageTTL(namespace, ttlInSeconds);
 
 ```
 
+</TabItem>
+
+</Tabs>
+
 ### Get the TTL configuration for a namespace
 
-#### pulsar-admin
+<Tabs 
+  defaultValue="pulsar-admin"
+  values={[{"label":"pulsar-admin","value":"pulsar-admin"},{"label":"REST API","value":"REST API"},{"label":"Java","value":"Java"}]}>
+<TabItem value="pulsar-admin">
 
 Use the [`get-message-ttl`](reference-pulsar-admin.md#pulsar-admin-namespaces-get-message-ttl) subcommand and specify a namespace.
 
@@ -359,11 +412,13 @@ $ pulsar-admin namespaces get-message-ttl my-tenant/my-ns
 
 ```
 
-#### REST API
+</TabItem>
+<TabItem value="REST API">
 
 {@inject: endpoint|GET|/admin/v2/namespaces/:tenant/:namespace/messageTTL|operation/getNamespaceMessageTTL?version=@pulsar:version_number@}
 
-#### Java
+</TabItem>
+<TabItem value="Java">
 
 ```java
 
@@ -371,9 +426,16 @@ admin.namespaces().getNamespaceMessageTTL(namespace)
 
 ```
 
+</TabItem>
+
+</Tabs>
+
 ### Remove the TTL configuration for a namespace
 
-#### pulsar-admin
+<Tabs 
+  defaultValue="pulsar-admin"
+  values={[{"label":"pulsar-admin","value":"pulsar-admin"},{"label":"REST API","value":"REST API"},{"label":"Java","value":"Java"}]}>
+<TabItem value="pulsar-admin">
 
 Use the [`remove-message-ttl`](reference-pulsar-admin.md#pulsar-admin-namespaces-remove-message-ttl) subcommand and specify a namespace.
 
@@ -385,11 +447,13 @@ $ pulsar-admin namespaces remove-message-ttl my-tenant/my-ns
 
 ```
 
-#### REST API
+</TabItem>
+<TabItem value="REST API">
 
 {@inject: endpoint|DELETE|/admin/v2/namespaces/:tenant/:namespace/messageTTL|operation/removeNamespaceMessageTTL?version=@pulsar:version_number@}
 
-#### Java
+</TabItem>
+<TabItem value="Java">
 
 ```java
 
@@ -397,6 +461,10 @@ admin.namespaces().removeNamespaceMessageTTL(namespace)
 
 ```
 
+</TabItem>
+
+</Tabs>
+
 ## Delete messages from namespaces
 
 If you do not have any retention period and that you never have much of a backlog, the upper limit for retaining messages, which are acknowledged, equals to the Pulsar segment rollover period + entry log rollover period + (garbage collection interval * garbage collection ratios).
diff --git a/site2/website-next/versioned_docs/version-2.4.0/deploy-aws.md b/site2/website-next/versioned_docs/version-2.4.0/deploy-aws.md
index 6323051..2034749 100644
--- a/site2/website-next/versioned_docs/version-2.4.0/deploy-aws.md
+++ b/site2/website-next/versioned_docs/version-2.4.0/deploy-aws.md
@@ -2,7 +2,6 @@
 id: deploy-aws
 title: Deploying a Pulsar cluster on AWS using Terraform and Ansible
 sidebar_label: "Amazon Web Services"
-original_id: deploy-aws
 ---
 
 > For instructions on deploying a single Pulsar cluster manually rather than using Terraform and Ansible, see [Deploying a Pulsar cluster on bare metal](deploy-bare-metal.md). For instructions on manually deploying a multi-cluster Pulsar instance, see [Deploying a Pulsar instance on bare metal](deploy-bare-metal-multi-cluster).
@@ -148,7 +147,7 @@ Variable name | Description | Default
 When you run the Ansible playbook, the following AWS resources are used:
 
 * 9 total [Elastic Compute Cloud](https://aws.amazon.com/ec2) (EC2) instances running the [ami-9fa343e7](https://access.redhat.com/articles/3135091) Amazon Machine Image (AMI), which runs [Red Hat Enterprise Linux (RHEL) 7.4](https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/7/html-single/7.4_release_notes/index). By default, that includes:
-  * 3 small VMs for ZooKeeper ([t2.small](https://www.ec2instances.info/?selected=t2.small) instances)
+  * 3 small VMs for ZooKeeper ([t3.small](https://www.ec2instances.info/?selected=t3.small) instances)
   * 3 larger VMs for BookKeeper [bookies](reference-terminology.md#bookie) ([i3.xlarge](https://www.ec2instances.info/?selected=i3.xlarge) instances)
   * 2 larger VMs for Pulsar [brokers](reference-terminology.md#broker) ([c5.2xlarge](https://www.ec2instances.info/?selected=c5.2xlarge) instances)
   * 1 larger VMs for Pulsar [proxy](reference-terminology.md#proxy) ([c5.2xlarge](https://www.ec2instances.info/?selected=c5.2xlarge) instances)
diff --git a/site2/website-next/versioned_docs/version-2.4.0/deploy-bare-metal-multi-cluster.md b/site2/website-next/versioned_docs/version-2.4.0/deploy-bare-metal-multi-cluster.md
index 643c122..9dd2526 100644
--- a/site2/website-next/versioned_docs/version-2.4.0/deploy-bare-metal-multi-cluster.md
+++ b/site2/website-next/versioned_docs/version-2.4.0/deploy-bare-metal-multi-cluster.md
@@ -2,38 +2,30 @@
 id: deploy-bare-metal-multi-cluster
 title: Deploying a multi-cluster on bare metal
 sidebar_label: "Bare metal multi-cluster"
-original_id: deploy-bare-metal-multi-cluster
 ---
 
 :::tip
 
-1. Single-cluster Pulsar installations should be sufficient for all but the most ambitious use cases. If you are interested in experimenting with
-Pulsar or using it in a startup or on a single team, you had better opt for a single cluster. For instructions on deploying a single cluster,
-see the guide [here](deploy-bare-metal).
-2. If you want to use all builtin [Pulsar IO](io-overview) connectors in your Pulsar deployment, you need to download `apache-pulsar-io-connectors`
-package and install `apache-pulsar-io-connectors` under `connectors` directory in the pulsar directory on every broker node or on every function-worker node if you
-run a separate cluster of function workers for [Pulsar Functions](functions-overview).
-3. If you want to use [Tiered Storage](concepts-tiered-storage) feature in your Pulsar deployment, you need to download `apache-pulsar-offloaders`
-package and install `apache-pulsar-offloaders` under `offloaders` directory in the pulsar directory on every broker node. For more details of how to configure
-this feature, you can refer to the [Tiered storage cookbook](cookbooks-tiered-storage).
+1. You can use single-cluster Pulsar installation in most use cases, such as experimenting with Pulsar or using Pulsar in a startup or in a single team. If you need to run a multi-cluster Pulsar instance, see the [guide](deploy-bare-metal-multi-cluster).
+2. If you want to use all built-in [Pulsar IO](io-overview.md) connectors, you need to download `apache-pulsar-io-connectors`package and install `apache-pulsar-io-connectors` under `connectors` directory in the pulsar directory on every broker node or on every function-worker node if you have run a separate cluster of function workers for [Pulsar Functions](functions-overview).
+3. If you want to use [Tiered Storage](concepts-tiered-storage.md) feature in your Pulsar deployment, you need to download `apache-pulsar-offloaders`package and install `apache-pulsar-offloaders` under `offloaders` directory in the Pulsar directory on every broker node. For more details of how to configure this feature, you can refer to the [Tiered storage cookbook](cookbooks-tiered-storage).
 
 :::
 
-A Pulsar *instance* consists of multiple Pulsar clusters working in unison. You can distribute clusters across data centers or geographical regions and replicate the clusters amongst themselves using [geo-replication](administration-geo). Deploying a multi-cluster Pulsar instance involves the following basic steps:
+A Pulsar instance consists of multiple Pulsar clusters working in unison. You can distribute clusters across data centers or geographical regions and replicate the clusters amongst themselves using [geo-replication](administration-geo).Deploying a  multi-cluster Pulsar instance consists of the following steps:
 
-* Deploying two separate [ZooKeeper](#deploy-zookeeper) quorums: a [local](#deploy-local-zookeeper) quorum for each cluster in the instance and a [configuration store](#configuration-store) quorum for instance-wide tasks
-* Initializing [cluster metadata](#cluster-metadata-initialization) for each cluster
-* Deploying a [BookKeeper cluster](#deploy-bookkeeper) of bookies in each Pulsar cluster
-* Deploying [brokers](#deploy-brokers) in each Pulsar cluster
+1. Deploying two separate ZooKeeper quorums: a local quorum for each cluster in the instance and a configuration store quorum for instance-wide tasks
+2. Initializing cluster metadata for each cluster
+3. Deploying a BookKeeper cluster of bookies in each Pulsar cluster
+4. Deploying brokers in each Pulsar cluster
 
-If you want to deploy a single Pulsar cluster, see [Clusters and Brokers](getting-started-standalone.md#start-the-cluster).
 
 > #### Run Pulsar locally or on Kubernetes?
-> This guide shows you how to deploy Pulsar in production in a non-Kubernetes environment. If you want to run a standalone Pulsar cluster on a single machine for development purposes, see the [Setting up a local cluster](getting-started-standalone.md) guide. If you want to run Pulsar on [Kubernetes](https://kubernetes.io), see the [Pulsar on Kubernetes](deploy-kubernetes) guide, which includes sections on running Pulsar on Kubernetes on [Google Kubernetes Engine](deploy-kubernetes#pulsar [...]
+> This guide shows you how to deploy Pulsar in production in a non-Kubernetes environment. If you want to run a standalone Pulsar cluster on a single machine for development purposes, see the [Setting up a local cluster](getting-started-standalone.md) guide. If you want to run Pulsar on [Kubernetes](https://kubernetes.io), see the [Pulsar on Kubernetes](deploy-kubernetes) guide, which includes sections on running Pulsar on Kubernetes, on Google Kubernetes Engine and on Amazon Web Services.
 
 ## System requirement
 
-Currently, Pulsar is available for 64-bit **macOS**, **Linux**, and **Windows**. To use Pulsar, you need to install 64-bit JRE/JDK 8 or later versions.
+Currently, Pulsar is available for 64-bit **macOS**, **Linux**, and **Windows**. You need to install 64-bit JRE/JDK 8 or later versions.
 
 :::note
 
@@ -68,8 +60,6 @@ $ cd apache-pulsar-@pulsar:version@
 
 ```
 
-## What your package contains
-
 The Pulsar binary package initially contains the following directories:
 
 Directory | Contains
@@ -93,17 +83,17 @@ Directory | Contains
 
 Each Pulsar instance relies on two separate ZooKeeper quorums.
 
-* [Local ZooKeeper](#deploy-local-zookeeper) operates at the cluster level and provides cluster-specific configuration management and coordination. Each Pulsar cluster needs to have a dedicated ZooKeeper cluster.
-* [Configuration Store](#deploy-the-configuration-store) operates at the instance level and provides configuration management for the entire system (and thus across clusters). An independent cluster of machines or the same machines that local ZooKeeper uses can provide the configuration store quorum.
+* Local ZooKeeper operates at the cluster level and provides cluster-specific configuration management and coordination. Each Pulsar cluster needs a dedicated ZooKeeper cluster.
+* Configuration Store operates at the instance level and provides configuration management for the entire system (and thus across clusters). An independent cluster of machines or the same machines that local ZooKeeper uses can provide the configuration store quorum.
 
-The configuration store quorum can be provided by an independent cluster of machines or by the same machines used by local ZooKeeper.
+You can use an independent cluster of machines or the same machines used by local ZooKeeper to provide the configuration store quorum.
 
 
 ### Deploy local ZooKeeper
 
 ZooKeeper manages a variety of essential coordination-related and configuration-related tasks for Pulsar.
 
-You need to stand up one local ZooKeeper cluster *per Pulsar cluster* for deploying a Pulsar instance. 
+You need to stand up one local ZooKeeper cluster per Pulsar cluster for deploying a Pulsar instance. 
 
 To begin, add all ZooKeeper servers to the quorum configuration specified in the [`conf/zookeeper.conf`](reference-configuration.md#zookeeper) file. Add a `server.N` line for each node in the cluster to the configuration, where `N` is the number of the ZooKeeper node. The following is an example for a three-node cluster:
 
@@ -117,7 +107,11 @@ server.3=zk3.us-west.example.com:2888:3888
 
 On each host, you need to specify the ID of the node in the `myid` file of each node, which is in `data/zookeeper` folder of each server by default (you can change the file location via the [`dataDir`](reference-configuration.md#zookeeper-dataDir) parameter).
 
-> See the [Multi-server setup guide](https://zookeeper.apache.org/doc/r3.4.10/zookeeperAdmin.html#sc_zkMulitServerSetup) in the ZooKeeper documentation for detailed information on `myid` and more.
+:::tip
+
+See the [Multi-server setup guide](https://zookeeper.apache.org/doc/r3.4.10/zookeeperAdmin.html#sc_zkMulitServerSetup) in the ZooKeeper documentation for detailed information on `myid` and more.
+
+:::
 
 On a ZooKeeper server at `zk1.us-west.example.com`, for example, you could set the `myid` value like this:
 
@@ -140,15 +134,15 @@ $ bin/pulsar-daemon start zookeeper
 
 ### Deploy the configuration store 
 
-The ZooKeeper cluster that is configured and started up in the section above is a *local* ZooKeeper cluster that you can use to manage a single Pulsar cluster. In addition to a local cluster, however, a full Pulsar instance also requires a configuration store for handling some instance-level configuration and coordination tasks.
+The ZooKeeper cluster configured and started up in the section above is a local ZooKeeper cluster that you can use to manage a single Pulsar cluster. In addition to a local cluster, however, a full Pulsar instance also requires a configuration store for handling some instance-level configuration and coordination tasks.
 
-If you deploy a [single-cluster](#single-cluster-pulsar-instance) instance, you do not need a separate cluster for the configuration store. If, however, you deploy a [multi-cluster](#multi-cluster-pulsar-instance) instance, you should stand up a separate ZooKeeper cluster for configuration tasks.
+If you deploy a single-cluster instance, you do not need a separate cluster for the configuration store. If, however, you deploy a multi-cluster instance, you should stand up a separate ZooKeeper cluster for configuration tasks.
 
 #### Single-cluster Pulsar instance
 
 If your Pulsar instance consists of just one cluster, then you can deploy a configuration store on the same machines as the local ZooKeeper quorum but run on different TCP ports.
 
-To deploy a ZooKeeper configuration store in a single-cluster instance, add the same ZooKeeper servers that the local quorum uses to the configuration file in [`conf/global_zookeeper.conf`](reference-configuration.md#configuration-store) using the same method for [local ZooKeeper](#local-zookeeper), but make sure to use a different port (2181 is the default for ZooKeeper). The following is an example that uses port 2184 for a three-node ZooKeeper cluster:
+To deploy a ZooKeeper configuration store in a single-cluster instance, add the same ZooKeeper servers that the local quorum. You need to use the configuration file in [`conf/global_zookeeper.conf`](reference-configuration.md#configuration-store) using the same method for [local ZooKeeper](#local-zookeeper), but make sure to use a different port (2181 is the default for ZooKeeper). The following is an example that uses port 2184 for a three-node ZooKeeper cluster:
 
 ```properties
 
@@ -165,13 +159,11 @@ As before, create the `myid` files for each server on `data/global-zookeeper/myi
 
 When you deploy a global Pulsar instance, with clusters distributed across different geographical regions, the configuration store serves as a highly available and strongly consistent metadata store that can tolerate failures and partitions spanning whole regions.
 
-The key here is to make sure the ZK quorum members are spread across at least 3 regions and that other regions run as observers.
+The key here is to make sure the ZK quorum members are spread across at least 3 regions, and other regions run as observers.
 
-Again, given the very low expected load on the configuration store servers, you can
-share the same hosts used for the local ZooKeeper quorum.
+Again, given the very low expected load on the configuration store servers, you can share the same hosts used for the local ZooKeeper quorum.
 
-For example, assume a Pulsar instance with the following clusters `us-west`,
-`us-east`, `us-central`, `eu-central`, `ap-south`. Also assume, each cluster has its own local ZK servers named such as the following: 
+For example, assume a Pulsar instance with the following clusters `us-west`, `us-east`, `us-central`, `eu-central`, `ap-south`. Also assume, each cluster has its own local ZK servers named such as the following: 
 
 ```
 
@@ -179,8 +171,7 @@ zk[1-3].${CLUSTER}.example.com
 
 ```
 
-In this scenario if you want to pick the quorum participants from few clusters and
-let all the others be ZK observers. For example, to form a 7 servers quorum, you can pick 3 servers from `us-west`, 2 from `us-central` and 2 from `us-east`.
+In this scenario if you want to pick the quorum participants from few clusters and let all the others be ZK observers. For example, to form a 7 servers quorum, you can pick 3 servers from `us-west`, 2 from `us-central` and 2 from `us-east`.
 
 This method guarantees that writes to configuration store is possible even if one of these regions is unreachable.
 
@@ -227,7 +218,7 @@ $ bin/pulsar-daemon start configuration-store
 
 ## Cluster metadata initialization
 
-Once you set up the cluster-specific ZooKeeper and configuration store quorums for your instance, you need to write some metadata to ZooKeeper for each cluster in your instance. **you only needs to write these metadata once**.
+Once you set up the cluster-specific ZooKeeper and configuration store quorums for your instance, you need to write some metadata to ZooKeeper for each cluster in your instance. **you only need to write these metadata once**.
 
 You can initialize this metadata using the [`initialize-cluster-metadata`](reference-cli-tools.md#pulsar-initialize-cluster-metadata) command of the [`pulsar`](reference-cli-tools.md#pulsar) CLI tool. The following is an example:
 
@@ -260,7 +251,7 @@ Make sure to run `initialize-cluster-metadata` for each cluster in your instance
 
 BookKeeper provides [persistent message storage](concepts-architecture-overview.md#persistent-storage) for Pulsar.
 
-Each Pulsar broker needs to have its own cluster of bookies. The BookKeeper cluster shares a local ZooKeeper quorum with the Pulsar cluster.
+Each Pulsar broker needs its own cluster of bookies. The BookKeeper cluster shares a local ZooKeeper quorum with the Pulsar cluster.
 
 ### Configure bookies
 
@@ -280,7 +271,7 @@ $ bin/pulsar-daemon start bookie
 
 You can verify that the bookie works properly using the `bookiesanity` command for the [BookKeeper shell](reference-cli-tools.md#bookkeeper-shell):
 
-```shell
+```bash
 
 $ bin/bookkeeper shell bookiesanity
 
@@ -304,7 +295,7 @@ Bookie hosts are responsible for storing message data on disk. In order for book
 Message entries written to bookies are always synced to disk before returning an acknowledgement to the Pulsar broker. To ensure low write latency, BookKeeper is
 designed to use multiple devices:
 
-* A **journal** to ensure durability. For sequential writes, having fast [fsync](https://linux.die.net/man/2/fsync) operations on bookie hosts is critical. Typically, small and fast [solid-state drives](https://en.wikipedia.org/wiki/Solid-state_drive) (SSDs) should suffice, or [hard disk drives](https://en.wikipedia.org/wiki/Hard_disk_drive) (HDDs) with a [RAID](https://en.wikipedia.org/wiki/RAID)s controller and a battery-backed write cache. Both solutions can reach fsync latency of ~0.4 ms.
+* A **journal** to ensure durability. For sequential writes, having fast [fsync](https://linux.die.net/man/2/fsync) operations on bookie hosts is critical. Typically, small and fast [solid-state drives](https://en.wikipedia.org/wiki/Solid-state_drive) (SSDs) should suffice, or [hard disk drives](https://en.wikipedia.org/wiki/Hard_disk_drive) (HDDs) with a [RAID](https://en.wikipedia.org/wiki/RAID) controller and a battery-backed write cache. Both solutions can reach fsync latency of ~0.4 ms.
 * A **ledger storage device** is where data is stored until all consumers acknowledge the message. Writes happen in the background, so write I/O is not a big concern. Reads happen sequentially most of the time and the backlog is drained only in case of consumer drain. To store large amounts of data, a typical configuration involves multiple HDDs with a RAID controller.
 
 
@@ -371,39 +362,13 @@ $ bin/pulsar broker
 
 ## Service discovery
 
-[Clients](getting-started-clients) connecting to Pulsar brokers need to be able to communicate with an entire Pulsar instance using a single URL. Pulsar provides a built-in service discovery mechanism that you can set up using the instructions [immediately below](#service-discovery-setup).
+[Clients](getting-started-clients) connecting to Pulsar brokers need to communicate with an entire Pulsar instance using a single URL.
 
-You can also use your own service discovery system if you want. If you use your own system, you only need to satisfy just one requirement: when a client performs an HTTP request to an [endpoint](reference-configuration) for a Pulsar cluster, such as `http://pulsar.us-west.example.com:8080`, the client needs to be redirected to *some* active broker in the desired cluster, whether via DNS, an HTTP or IP redirect, or some other means.
+You can use your own service discovery system. If you use your own system, you only need to satisfy just one requirement: when a client performs an HTTP request to an [endpoint](reference-configuration) for a Pulsar cluster, such as `http://pulsar.us-west.example.com:8080`, the client needs to be redirected to some active brokers in the desired cluster, whether via DNS, an HTTP or IP redirect, or some other means.
 
-> #### Service discovery already provided by many scheduling systems
+> **Service discovery already provided by many scheduling systems**
 > Many large-scale deployment systems, such as [Kubernetes](deploy-kubernetes), have service discovery systems built in. If you run Pulsar on such a system, you may not need to provide your own service discovery mechanism.
 
-
-### Service discovery setup
-
-The service discovery mechanism that included with Pulsar maintains a list of active brokers, which stored in ZooKeeper, and supports lookup using HTTP and also the [binary protocol](developing-binary-protocol) of Pulsar.
-
-To get started setting up the built-in service of discovery of Pulsar, you need to change a few parameters in the [`conf/discovery.conf`](reference-configuration.md#service-discovery) configuration file. Set the [`zookeeperServers`](reference-configuration.md#service-discovery-zookeeperServers) parameter to the ZooKeeper quorum connection string of the cluster and the [`configurationStoreServers`](reference-configuration.md#service-discovery-configurationStoreServers) setting to the [con [...]
-store](reference-terminology.md#configuration-store) quorum connection string.
-
-```properties
-
-# Zookeeper quorum connection string
-zookeeperServers=zk1.us-west.example.com:2181,zk2.us-west.example.com:2181,zk3.us-west.example.com:2181
-
-# Global configuration store connection string
-configurationStoreServers=zk1.us-west.example.com:2184,zk2.us-west.example.com:2184,zk3.us-west.example.com:2184
-
-```
-
-To start the discovery service:
-
-```shell
-
-$ bin/pulsar-daemon start discovery
-
-```
-
 ## Admin client and verification
 
 At this point your Pulsar instance should be ready to use. You can now configure client machines that can serve as [administrative clients](admin-api-overview) for each cluster. You can use the [`conf/client.conf`](reference-configuration.md#client) configuration file to configure admin clients.
diff --git a/site2/website-next/versioned_docs/version-2.4.0/deploy-dcos.md b/site2/website-next/versioned_docs/version-2.4.0/deploy-dcos.md
index f5f8d1f..07f446e 100644
--- a/site2/website-next/versioned_docs/version-2.4.0/deploy-dcos.md
+++ b/site2/website-next/versioned_docs/version-2.4.0/deploy-dcos.md
@@ -7,18 +7,17 @@ original_id: deploy-dcos
 
 :::tip
 
-If you want to enable all builtin [Pulsar IO](io-overview) connectors in your Pulsar deployment, you can choose to use `apachepulsar/pulsar-all` image instead of
-`apachepulsar/pulsar` image. `apachepulsar/pulsar-all` image has already bundled [all builtin connectors](io-overview.md#working-with-connectors).
+To enable all built-in [Pulsar IO](io-overview) connectors in your Pulsar deploymente, we recommend you use `apachepulsar/pulsar-all` image instead of `apachepulsar/pulsar` image; the former has already bundled [all built-in connectors](io-overview.md#working-with-connectors).
 
 :::
 
-[DC/OS](https://dcos.io/) (the <strong>D</strong>ata<strong>C</strong>enter <strong>O</strong>perating <strong>S</strong>ystem) is a distributed operating system used for deploying and managing applications and systems on [Apache Mesos](http://mesos.apache.org/). DC/OS is an open-source tool that [Mesosphere](https://mesosphere.com/) creates and maintains .
+[DC/OS](https://dcos.io/) (the <strong>D</strong>ata<strong>C</strong>enter <strong>O</strong>perating <strong>S</strong>ystem) is a distributed operating system for deploying and managing applications and systems on [Apache Mesos](http://mesos.apache.org/). DC/OS is an open-source tool created and maintained by [Mesosphere](https://mesosphere.com/).
 
 Apache Pulsar is available as a [Marathon Application Group](https://mesosphere.github.io/marathon/docs/application-groups.html), which runs multiple applications as manageable sets.
 
 ## Prerequisites
 
-In order to run Pulsar on DC/OS, you need the following:
+You need to prepare your environment before running Pulsar on DC/OS.
 
 * DC/OS version [1.9](https://docs.mesosphere.com/1.9/) or higher
 * A [DC/OS cluster](https://docs.mesosphere.com/1.9/installing/) with at least three agent nodes
@@ -37,7 +36,7 @@ Each node in the DC/OS-managed Mesos cluster must have at least:
 * 4 GB of memory
 * 60 GB of total persistent disk
 
-Alternatively, you can change the configuration in `PulsarGroups.json` according to match your resources of DC/OS cluster.
+Alternatively, you can change the configuration in `PulsarGroups.json` accordingly to match your resources of the DC/OS cluster.
 
 ## Deploy Pulsar using the DC/OS command interface
 
@@ -56,9 +55,9 @@ This command deploys Docker container instances in three groups, which together
 * 1 [Prometheus](http://prometheus.io/) instance and 1 [Grafana](https://grafana.com/) instance
 
 
-> When you run DC/OS, a ZooKeeper cluster already runs at `master.mesos:2181`, thus you do not have to install or start up ZooKeeper separately.
+> When you run DC/OS, a ZooKeeper cluster will be running at `master.mesos:2181`, thus you do not have to install or start up ZooKeeper separately.
 
-After executing the `dcos` command above, click on the **Services** tab in the DC/OS [GUI interface](https://docs.mesosphere.com/latest/gui/), which you can access at [http://m1.dcos](http://m1.dcos) in this example. You should see several applications in the process of deploying.
+After executing the `dcos` command above, click the **Services** tab in the DC/OS [GUI interface](https://docs.mesosphere.com/latest/gui/), which you can access at [http://m1.dcos](http://m1.dcos) in this example. You should see several applications during the deployment.
 
 ![DC/OS command executed](/assets/dcos_command_execute.png)
 
@@ -66,15 +65,15 @@ After executing the `dcos` command above, click on the **Services** tab in the D
 
 ## The BookKeeper group
 
-To monitor the status of the BookKeeper cluster deployment, click on the **bookkeeper** group in the parent **pulsar** group.
+To monitor the status of the BookKeeper cluster deployment, click the **bookkeeper** group in the parent **pulsar** group.
 
 ![DC/OS bookkeeper status](/assets/dcos_bookkeeper_status.png)
 
-At this point, 3 [bookies](reference-terminology.md#bookie) should be shown as green, which means that the bookies have been deployed successfully and are now running.
+At this point, the status of the 3 [bookies](reference-terminology.md#bookie) are green, which means that the bookies have been deployed successfully and are running.
  
 ![DC/OS bookkeeper running](/assets/dcos_bookkeeper_run.png)
  
-You can also click into each bookie instance to get more detailed information, such as the bookie running log.
+You can also click each bookie instance to get more detailed information, such as the bookie running log.
 
 ![DC/OS bookie log](/assets/dcos_bookie_log.png)
 
@@ -82,23 +81,23 @@ To display information about the BookKeeper in ZooKeeper, you can visit [http://
 
 ![DC/OS bookkeeper in zk](/assets/dcos_bookkeeper_in_zookeeper.png)
 
-## The Pulsar broker Group
+## The Pulsar broker group
 
-Similar to the BookKeeper group above, click into the **brokers** to check the status of the Pulsar brokers.
+Similar to the BookKeeper group above, click **brokers** to check the status of the Pulsar brokers.
 
 ![DC/OS broker status](/assets/dcos_broker_status.png)
 
 ![DC/OS broker running](/assets/dcos_broker_run.png)
 
-You can also click into each broker instance to get more detailed information, such as the broker running log.
+You can also click each broker instance to get more detailed information, such as the broker running log.
 
 ![DC/OS broker log](/assets/dcos_broker_log.png)
 
-Broker cluster information in Zookeeper is also available through the web UI. In this example, you can see that the `loadbalance` and `managed-ledgers` directories have been created.
+Broker cluster information in ZooKeeper is also available through the web UI. In this example, you can see that the `loadbalance` and `managed-ledgers` directories have been created.
 
 ![DC/OS broker in zk](/assets/dcos_broker_in_zookeeper.png)
 
-## Monitor Group
+## Monitor group
 
 The **monitory** group consists of Prometheus and Grafana.
 
@@ -106,17 +105,17 @@ The **monitory** group consists of Prometheus and Grafana.
 
 ### Prometheus
 
-Click into the instance of `prom` to get the endpoint of Prometheus, which is `192.168.65.121:9090` in this example.
+Click the instance of `prom` to get the endpoint of Prometheus, which is `192.168.65.121:9090` in this example.
 
 ![DC/OS prom endpoint](/assets/dcos_prom_endpoint.png)
 
-If you click that endpoint, you can see the Prometheus dashboard. The [http://192.168.65.121:9090/targets](http://192.168.65.121:9090/targets) URL display all the bookies and brokers.
+If you click that endpoint, you can see the Prometheus dashboard. All the bookies and brokers are listed on [http://192.168.65.121:9090/targets](http://192.168.65.121:9090/targets).
 
 ![DC/OS prom targets](/assets/dcos_prom_targets.png)
 
 ### Grafana
 
-Click into `grafana` to get the endpoint for Grafana, which is `192.168.65.121:3000` in this example.
+Click `grafana` to get the endpoint for Grafana, which is `192.168.65.121:3000` in this example.
  
 ![DC/OS grafana endpoint](/assets/dcos_grafana_endpoint.png)
 
@@ -130,7 +129,7 @@ Now that you have a fully deployed Pulsar cluster, you can run a simple consumer
 
 ### Download and prepare the Pulsar Java tutorial
 
-You can clone a [Pulsar Java tutorial](https://github.com/streamlio/pulsar-java-tutorial) repo. This repo contains a simple Pulsar consumer and producer (you can find more information in the `README` file of the repo).
+You can clone a [Pulsar Java tutorial](https://github.com/streamlio/pulsar-java-tutorial) repo. This repo contains a simple Pulsar consumer and producer (you can find more information in the `README` file in this repo).
 
 ```bash
 
@@ -138,12 +137,13 @@ $ git clone https://github.com/streamlio/pulsar-java-tutorial
 
 ```
 
-Change the `SERVICE_URL` from `pulsar://localhost:6650` to `pulsar://a1.dcos:6650` in both [`ConsumerTutorial.java`](https://github.com/streamlio/pulsar-java-tutorial/blob/master/src/main/java/tutorial/ConsumerTutorial.java) and [`ProducerTutorial.java`](https://github.com/streamlio/pulsar-java-tutorial/blob/master/src/main/java/tutorial/ProducerTutorial.java).
-The `pulsar://a1.dcos:6650` endpoint is for the broker service. You can fetch the endpoint details for each broker instance from the DC/OS GUI. `a1.dcos` is a DC/OS client agent, which runs a broker. The client agent IP address can also replace this.
+Change the `SERVICE_URL` from `pulsar://localhost:6650` to `pulsar://a1.dcos:6650` in both [`ConsumerTutorial.java`](https://github.com/streamlio/pulsar-java-tutorial/blob/master/src/main/java/tutorial/ConsumerTutorial.java) file and [`ProducerTutorial.java`](https://github.com/streamlio/pulsar-java-tutorial/blob/master/src/main/java/tutorial/ProducerTutorial.java) file.
 
-Now, change the message number from 10 to 10000000 in main method of [`ProducerTutorial.java`](https://github.com/streamlio/pulsar-java-tutorial/blob/master/src/main/java/tutorial/ProducerTutorial.java) so that it can produce more messages.
+The `pulsar://a1.dcos:6650` endpoint is for the broker service. You can fetch the endpoint details for each broker instance from the DC/OS GUI. `a1.dcos` is a DC/OS client agent that runs a broker, and you can replace it with the client agent IP address.
 
-Now compile the project code using the command below:
+Now, you can change the message number from 10 to 10000000 in the main method in [`ProducerTutorial.java`](https://github.com/streamlio/pulsar-java-tutorial/blob/master/src/main/java/tutorial/ProducerTutorial.java) file to produce more messages.
+
+Then, you can compile the project code using the command below:
 
 ```bash
 
@@ -169,7 +169,7 @@ $ mvn exec:java -Dexec.mainClass="tutorial.ProducerTutorial"
 
 ```
 
-You can see the producer producing messages and the consumer consuming messages through the DC/OS GUI.
+You see that the producer is producing messages and the consumer is consuming messages through the DC/OS GUI.
 
 ![DC/OS pulsar producer](/assets/dcos_producer.png)
 
@@ -177,20 +177,20 @@ You can see the producer producing messages and the consumer consuming messages
 
 ### View Grafana metric output
 
-While the producer and consumer run, you can access running metrics information from Grafana.
+While the producer and consumer are running, you can access the running metrics from Grafana.
 
 ![DC/OS pulsar dashboard](/assets/dcos_metrics.png)
 
 
 ## Uninstall Pulsar
 
-You can shut down and uninstall the `pulsar` application from DC/OS at any time in the following two ways:
+You can shut down and uninstall the `pulsar` application from DC/OS at any time in one of the following two ways:
 
-1. Using the DC/OS GUI, you can choose **Delete** at the right end of Pulsar group.
+1. Click the three dots at the right end of Pulsar group and choose **Delete** on the DC/OS GUI.
 
    ![DC/OS pulsar uninstall](/assets/dcos_uninstall.png)
 
-2. You can use the following command:
+2. Use the command below.
 
    ```bash
    
diff --git a/site2/website-next/versioned_docs/version-2.4.0/deploy-kubernetes.md b/site2/website-next/versioned_docs/version-2.4.0/deploy-kubernetes.md
index dc7123d..4e170dc 100644
--- a/site2/website-next/versioned_docs/version-2.4.0/deploy-kubernetes.md
+++ b/site2/website-next/versioned_docs/version-2.4.0/deploy-kubernetes.md
@@ -2,7 +2,6 @@
 id: deploy-kubernetes
 title: Deploy Pulsar on Kubernetes
 sidebar_label: "Kubernetes"
-original_id: deploy-kubernetes
 ---
 
 To get up and running with these charts as fast as possible, in a **non-production** use case, we provide
diff --git a/site2/website-next/versioned_docs/version-2.4.0/deploy-monitoring.md b/site2/website-next/versioned_docs/version-2.4.0/deploy-monitoring.md
index 074ce3f..95ccdd6 100644
--- a/site2/website-next/versioned_docs/version-2.4.0/deploy-monitoring.md
+++ b/site2/website-next/versioned_docs/version-2.4.0/deploy-monitoring.md
@@ -2,7 +2,6 @@
 id: deploy-monitoring
 title: Monitor
 sidebar_label: "Monitor"
-original_id: deploy-monitoring
 ---
 
 You can use different ways to monitor a Pulsar cluster, exposing both metrics related to the usage of topics and the overall health of the individual components of the cluster.
@@ -127,17 +126,7 @@ The per-topic dashboard instructions are available at [Pulsar manager](administr
 
 You can use grafana to create dashboard driven by the data that is stored in Prometheus.
 
-When you deploy Pulsar on Kubernetes, a `pulsar-grafana` Docker image is enabled by default. You can use the docker image with the principal dashboards.
-
-Enter the command below to use the dashboard manually:
-
-```shell
-
-docker run -p3000:3000 \
-        -e PROMETHEUS_URL=http://$PROMETHEUS_HOST:9090/ \
-        apachepulsar/pulsar-grafana:latest
-
-```
+When you deploy Pulsar on Kubernetes with the Pulsar Helm Chart, a `pulsar-grafana` Docker image is enabled by default. You can use the docker image with the principal dashboards.
 
 The following are some Grafana dashboards examples:
 
@@ -145,4 +134,4 @@ The following are some Grafana dashboards examples:
 - [apache-pulsar-grafana-dashboard](https://github.com/streamnative/apache-pulsar-grafana-dashboard): a collection of Grafana dashboard templates for different Pulsar components running on both Kubernetes and on-premise machines.
 
 ## Alerting rules
-You can set alerting rules according to your Pulsar environment. To configure alerting rules for Apache Pulsar, refer to [alerting rules](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/).
+You can set alerting rules according to your Pulsar environment. To configure alerting rules for Apache Pulsar, refer to [alerting rules](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/).
\ No newline at end of file
diff --git a/site2/website-next/versioned_docs/version-2.4.0/developing-binary-protocol.md b/site2/website-next/versioned_docs/version-2.4.0/develop-binary-protocol.md
similarity index 100%
rename from site2/website-next/versioned_docs/version-2.4.0/developing-binary-protocol.md
rename to site2/website-next/versioned_docs/version-2.4.0/develop-binary-protocol.md
diff --git a/site2/website-next/versioned_docs/version-2.4.0/developing-cpp.md b/site2/website-next/versioned_docs/version-2.4.0/develop-cpp.md
similarity index 100%
rename from site2/website-next/versioned_docs/version-2.4.0/developing-cpp.md
rename to site2/website-next/versioned_docs/version-2.4.0/develop-cpp.md
diff --git a/site2/website-next/versioned_docs/version-2.4.0/develop-load-manager.md b/site2/website-next/versioned_docs/version-2.4.0/develop-load-manager.md
index 509209b..9687f30 100644
--- a/site2/website-next/versioned_docs/version-2.4.0/develop-load-manager.md
+++ b/site2/website-next/versioned_docs/version-2.4.0/develop-load-manager.md
@@ -2,7 +2,6 @@
 id: develop-load-manager
 title: Modular load manager
 sidebar_label: "Modular load manager"
-original_id: develop-load-manager
 ---
 
 The *modular load manager*, implemented in  [`ModularLoadManagerImpl`](https://github.com/apache/pulsar/blob/master/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/ModularLoadManagerImpl.java), is a flexible alternative to the previously implemented load manager, [`SimpleLoadManagerImpl`](https://github.com/apache/pulsar/blob/master/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/SimpleLoadManagerImpl.java), which attempts to simplify how load  [...]
diff --git a/site2/website-next/versioned_docs/version-2.4.0/develop-tools.md b/site2/website-next/versioned_docs/version-2.4.0/develop-tools.md
index b545779..d034926 100644
--- a/site2/website-next/versioned_docs/version-2.4.0/develop-tools.md
+++ b/site2/website-next/versioned_docs/version-2.4.0/develop-tools.md
@@ -2,7 +2,6 @@
 id: develop-tools
 title: Simulation tools
 sidebar_label: "Simulation tools"
-original_id: develop-tools
 ---
 
 It is sometimes necessary create an test environment and incur artificial load to observe how well load managers
diff --git a/site2/website-next/versioned_docs/version-2.4.0/functions-deploying.md b/site2/website-next/versioned_docs/version-2.4.0/functions-deploying.md
index 8ad8dbe..fabdbb7 100644
--- a/site2/website-next/versioned_docs/version-2.4.0/functions-deploying.md
+++ b/site2/website-next/versioned_docs/version-2.4.0/functions-deploying.md
@@ -2,260 +2,4 @@
 id: functions-deploying
 title: Deploying and managing Pulsar Functions
 sidebar_label: "Deploying functions"
-original_id: functions-deploying
 ---
-
-At the moment, there are two deployment modes available for Pulsar Functions:
-
-Mode | Description
-:----|:-----------
-Local run mode | The function runs in your local environment, for example on your laptop
-Cluster mode | The function runs *inside of* your Pulsar cluster, on the same machines as your Pulsar brokers
-
-> #### Contributing new deployment modes
-> The Pulsar Functions feature was designed, however, with extensibility in mind. Other deployment options will be available in the future. If you'd like to add a new deployment option, we recommend getting in touch with the Pulsar developer community at [dev@pulsar.apache.org](mailto:dev@pulsar.apache.org).
-
-## Requirements
-
-In order to deploy and manage Pulsar Functions, you need to have a Pulsar cluster running. There are several options for this:
-
-* You can run a [standalone cluster](getting-started-standalone) locally on your own machine
-* You can deploy a Pulsar cluster on [Kubernetes](deploy-kubernetes.md), [Amazon Web Services](deploy-aws.md), [bare metal](deploy-bare-metal.md), [DC/OS](deploy-dcos), and more
-
-If you're running a non-[standalone](reference-terminology.md#standalone) cluster, you'll need to obtain the service URL for the cluster. How you obtain the service URL will depend on how you deployed your Pulsar cluster.
-
-If you're going to deploy and trigger python user-defined functions, you should install [the pulsar python client](http://pulsar.apache.org/docs/en/client-libraries-python/) first.
-
-## Command-line interface
-
-Pulsar Functions are deployed and managed using the [`pulsar-admin functions`](reference-pulsar-admin.md#functions) interface, which contains commands such as [`create`](reference-pulsar-admin.md#functions-create) for deploying functions in [cluster mode](#cluster-mode), [`trigger`](reference-pulsar-admin.md#trigger) for [triggering](#triggering-pulsar-functions) functions, [`list`](reference-pulsar-admin.md#list-2) for listing deployed functions, and several others.
-
-### Fully Qualified Function Name (FQFN)
-
-Each Pulsar Function has a **Fully Qualified Function Name** (FQFN) that consists of three elements: the function's tenant, namespace, and function name. FQFN's look like this:
-
-```http
-
-tenant/namespace/name
-
-```
-
-FQFNs enable you to, for example, create multiple functions with the same name provided that they're in different namespaces.
-
-### Default arguments
-
-When managing Pulsar Functions, you'll need to specify a variety of information about those functions, including tenant, namespace, input and output topics, etc. There are some parameters, however, that have default values that will be supplied if omitted. The table below lists the defaults:
-
-Parameter | Default
-:---------|:-------
-Function name | Whichever value is specified for the class name (minus org, library, etc.). The flag `--classname org.example.MyFunction`, for example, would give the function a name of `MyFunction`.
-Tenant | Derived from the input topics' names. If the input topics are under the `marketing` tenant---i.e. the topic names have the form `persistent://marketing/{namespace}/{topicName}`---then the tenant will be `marketing`.
-Namespace | Derived from the input topics' names. If the input topics are under the `asia` namespace under the `marketing` tenant---i.e. the topic names have the form `persistent://marketing/asia/{topicName}`, then the namespace will be `asia`.
-Output topic | `{input topic}-{function name}-output`. A function with an input topic name of `incoming` and a function name of `exclamation`, for example, would have an output topic of `incoming-exclamation-output`.
-Subscription type | For at-least-once and at-most-once [processing guarantees](functions-guarantees), the [`SHARED`](concepts-messaging.md#shared) is applied by default; for effectively-once guarantees, [`FAILOVER`](concepts-messaging.md#failover) is applied
-Processing guarantees | [`ATLEAST_ONCE`](functions-guarantees)
-Pulsar service URL | `pulsar://localhost:6650`
-
-#### Example use of defaults
-
-Take this `create` command:
-
-```bash
-
-$ bin/pulsar-admin functions create \
-  --jar my-pulsar-functions.jar \
-  --classname org.example.MyFunction \
-  --inputs my-function-input-topic1,my-function-input-topic2
-
-```
-
-The created function would have default values supplied for the function name (`MyFunction`), tenant (`public`), namespace (`default`), subscription type (`SHARED`), processing guarantees (`ATLEAST_ONCE`), and Pulsar service URL (`pulsar://localhost:6650`).
-
-## Local run mode
-
-If you run a Pulsar Function in **local run** mode, it will run on the machine from which the command is run (this could be your laptop, an [AWS EC2](https://aws.amazon.com/ec2/) instance, etc.). Here's an example [`localrun`](reference-pulsar-admin.md#localrun) command:
-
-```bash
-
-$ bin/pulsar-admin functions localrun \
-  --py myfunc.py \
-  --classname myfunc.SomeFunction \
-  --inputs persistent://public/default/input-1 \
-  --output persistent://public/default/output-1
-
-```
-
-By default, the function will connect to a Pulsar cluster running on the same machine, via a local [broker](reference-terminology.md#broker) service URL of `pulsar://localhost:6650`. If you'd like to use local run mode to run a function but connect it to a non-local Pulsar cluster, you can specify a different broker URL using the `--brokerServiceUrl` flag. Here's an example:
-
-```bash
-
-$ bin/pulsar-admin functions localrun \
-  --broker-service-url pulsar://my-cluster-host:6650 \
-  # Other function parameters
-
-```
-
-## Cluster mode
-
-When you run a Pulsar Function in **cluster mode**, the function code will be uploaded to a Pulsar broker and run *alongside the broker* rather than in your [local environment](#local-run-mode). You can run a function in cluster mode using the [`create`](reference-pulsar-admin.md#create-1) command. Here's an example:
-
-```bash
-
-$ bin/pulsar-admin functions create \
-  --py myfunc.py \
-  --classname myfunc.SomeFunction \
-  --inputs persistent://public/default/input-1 \
-  --output persistent://public/default/output-1
-
-```
-
-### Updating cluster mode functions
-
-You can use the [`update`](reference-pulsar-admin.md#update-1) command to update a Pulsar Function running in cluster mode. This command, for example, would update the function created in the section [above](#cluster-mode):
-
-```bash
-
-$ bin/pulsar-admin functions update \
-  --py myfunc.py \
-  --classname myfunc.SomeFunction \
-  --inputs persistent://public/default/new-input-topic \
-  --output persistent://public/default/new-output-topic
-
-```
-
-### Parallelism
-
-Pulsar Functions run as processes called **instances**. When you run a Pulsar Function, it runs as a single instance by default (and in [local run mode](#local-run-mode) you can *only* run a single instance of a function).
-
-You can also specify the *parallelism* of a function, i.e. the number of instances to run, when you create the function. You can set the parallelism factor using the `--parallelism` flag of the [`create`](reference-pulsar-admin.md#functions-create) command. Here's an example:
-
-```bash
-
-$ bin/pulsar-admin functions create \
-  --parallelism 3 \
-  # Other function info
-
-```
-
-You can adjust the parallelism of an already created function using the [`update`](reference-pulsar-admin.md#update-1) interface.
-
-```bash
-
-$ bin/pulsar-admin functions update \
-  --parallelism 5 \
-  # Other function
-
-```
-
-If you're specifying a function's configuration via YAML, use the `parallelism` parameter. Here's an example config file:
-
-```yaml
-
-# function-config.yaml
-parallelism: 3
-inputs:
-- persistent://public/default/input-1
-output: persistent://public/default/output-1
-# other parameters
-
-```
-
-And here's the corresponding update command:
-
-```bash
-
-$ bin/pulsar-admin functions update \
-  --function-config-file function-config.yaml
-
-```
-
-### Function instance resources
-
-When you run Pulsar Functions in [cluster run](#cluster-mode) mode, you can specify the resources that are assigned to each function [instance](#parallelism):
-
-Resource | Specified as... | Runtimes
-:--------|:----------------|:--------
-CPU | The number of cores | Docker (coming soon)
-RAM | The number of bytes | Process, Docker
-Disk space | The number of bytes | Docker
-
-Here's an example function creation command that allocates 8 cores, 8 GB of RAM, and 10 GB of disk space to a function:
-
-```bash
-
-$ bin/pulsar-admin functions create \
-  --jar target/my-functions.jar \
-  --classname org.example.functions.MyFunction \
-  --cpu 8 \
-  --ram 8589934592 \
-  --disk 10737418240
-
-```
-
-> #### Resources are *per instance*
-> The resources that you apply to a given Pulsar Function are applied to each [instance](#parallelism) of the function. If you apply 8 GB of RAM to a function with a parallelism of 5, for example, then you are applying 40 GB of RAM total for the function. You should always make sure to factor parallelism---i.e. the number of instances---into your resource calculations
-
-## Triggering Pulsar Functions
-
-If a Pulsar Function is running in [cluster mode](#cluster-mode), you can **trigger** it at any time using the command line. Triggering a function means that you send a message with a specific value to the function and get the function's output (if any) via the command line.
-
-> Triggering a function is ultimately no different from invoking a function by producing a message on one of the function's input topics. The [`pulsar-admin functions trigger`](reference-pulsar-admin.md#trigger) command is essentially a convenient mechanism for sending messages to functions without needing to use the [`pulsar-client`](reference-cli-tools.md#pulsar-client) tool or a language-specific client library.
-
-To show an example of function triggering, let's start with a simple [Python function](functions-api.md#functions-for-python) that returns a simple string based on the input:
-
-```python
-
-# myfunc.py
-def process(input):
-    return "This function has been triggered with a value of {0}".format(input)
-
-```
-
-Let's run that function in [local run mode](functions-deploying.md#local-run-mode):
-
-```bash
-
-$ bin/pulsar-admin functions create \
-  --tenant public \
-  --namespace default \
-  --name myfunc \
-  --py myfunc.py \
-  --classname myfunc \
-  --inputs persistent://public/default/in \
-  --output persistent://public/default/out
-
-```
-
-Now let's make a consumer listen on the output topic for messages coming from the `myfunc` function using the [`pulsar-client consume`](reference-cli-tools.md#consume) command:
-
-```bash
-
-$ bin/pulsar-client consume persistent://public/default/out \
-  --subscription-name my-subscription
-  --num-messages 0 # Listen indefinitely
-
-```
-
-Now let's trigger that function:
-
-```bash
-
-$ bin/pulsar-admin functions trigger \
-  --tenant public \
-  --namespace default \
-  --name myfunc \
-  --trigger-value "hello world"
-
-```
-
-The consumer listening on the output topic should then produce this in its logs:
-
-```
-
------ got message -----
-This function has been triggered with a value of hello world
-
-```
-
-> #### Topic info not required
-> In the `trigger` command above, you may have noticed that you only need to specify basic information about the function (tenant, namespace, and name). To trigger the function, you didn't need to know the function's input topic(s).
diff --git a/site2/website-next/versioned_docs/version-2.4.0/functions-guarantees.md b/site2/website-next/versioned_docs/version-2.4.0/functions-guarantees.md
index d9b1438..aefa8d3 100644
--- a/site2/website-next/versioned_docs/version-2.4.0/functions-guarantees.md
+++ b/site2/website-next/versioned_docs/version-2.4.0/functions-guarantees.md
@@ -2,46 +2,4 @@
 id: functions-guarantees
 title: Processing guarantees
 sidebar_label: "Processing guarantees"
-original_id: functions-guarantees
 ---
-
-Pulsar Functions provides three different messaging semantics that you can apply to any function:
-
-Delivery semantics | Description
-:------------------|:-------
-**At-most-once** delivery | Each message that is sent to the function will most likely be processed but also may not be (hence the "at most")
-**At-least-once** delivery | Each message that is sent to the function could be processed more than once (hence the "at least")
-**Effectively-once** delivery | Each message that is sent to the function will have one output associated with it
-
-## Applying processing guarantees to a function
-
-You can set the processing guarantees for a Pulsar Function when you create the Function. This [`pulsar-function create`](reference-pulsar-admin.md#create-1) command, for example, would apply effectively-once guarantees to the Function:
-
-```bash
-
-$ bin/pulsar-admin functions create \
-  --processing-guarantees EFFECTIVELY_ONCE \
-  # Other function configs
-
-```
-
-The available options are:
-
-* `ATMOST_ONCE`
-* `ATLEAST_ONCE`
-* `EFFECTIVELY_ONCE`
-
-> By default, Pulsar Functions provide at-least-once delivery guarantees. So if you create a function without supplying a value for the `--processingGuarantees` flag, then the function will provide at-least-once guarantees.
-
-## Updating the processing guarantees of a function
-
-You can change the processing guarantees applied to a function once it's already been created using the [`update`](reference-pulsar-admin.md#update-1) command. Here's an example:
-
-```bash
-
-$ bin/pulsar-admin functions update \
-  --processing-guarantees ATMOST_ONCE \
-  # Other function configs
-
-```
-
diff --git a/site2/website-next/versioned_docs/version-2.4.0/functions-metrics.md b/site2/website-next/versioned_docs/version-2.4.0/functions-metrics.md
index 8add669..e76c556 100644
--- a/site2/website-next/versioned_docs/version-2.4.0/functions-metrics.md
+++ b/site2/website-next/versioned_docs/version-2.4.0/functions-metrics.md
@@ -2,6 +2,5 @@
 id: functions-metrics
 title: Metrics for Pulsar Functions
 sidebar_label: "Metrics"
-original_id: functions-metrics
 ---
 
diff --git a/site2/website-next/versioned_docs/version-2.4.0/io-cdc.md b/site2/website-next/versioned_docs/version-2.4.0/io-cdc.md
index 20f16ae..df37397 100644
--- a/site2/website-next/versioned_docs/version-2.4.0/io-cdc.md
+++ b/site2/website-next/versioned_docs/version-2.4.0/io-cdc.md
@@ -2,7 +2,6 @@
 id: io-cdc
 title: CDC connector
 sidebar_label: "CDC connector"
-original_id: io-cdc
 ---
 
 CDC source connectors capture log changes of databases (such as MySQL, MongoDB, and PostgreSQL) into Pulsar.
diff --git a/site2/website-next/versioned_docs/version-2.4.0/io-connectors.md b/site2/website-next/versioned_docs/version-2.4.0/io-connectors.md
index 8db368e..9e8ede9 100644
--- a/site2/website-next/versioned_docs/version-2.4.0/io-connectors.md
+++ b/site2/website-next/versioned_docs/version-2.4.0/io-connectors.md
@@ -2,7 +2,6 @@
 id: io-connectors
 title: Built-in connector
 sidebar_label: "Built-in connector"
-original_id: io-connectors
 ---
 
 Pulsar distribution includes a set of common connectors that have been packaged and tested with the rest of Apache Pulsar. These connectors import and export data from some of the most commonly used data systems. 
@@ -46,6 +45,23 @@ Pulsar has various source connectors, which are sorted alphabetically as below.
 
 * [Java class](https://github.com/apache/pulsar/blob/master/pulsar-io/debezium/mongodb/src/main/java/org/apache/pulsar/io/debezium/mongodb/DebeziumMongoDbSource.java)
 
+### Debezium Oracle
+
+* [Configuration](io-debezium-source.md#configuration)
+
+* [Example](io-debezium-source.md#example-of-oracle)
+
+* [Java class](https://github.com/apache/pulsar/blob/master/pulsar-io/debezium/oracle/src/main/java/org/apache/pulsar/io/debezium/oracle/DebeziumOracleSource.java)
+
+### Debezium Microsoft SQL Server
+
+* [Configuration](io-debezium-source.md#configuration)
+
+* [Example](io-debezium-source.md#example-of-microsoft-sql)
+
+* [Java class](https://github.com/apache/pulsar/blob/master/pulsar-io/debezium/mssql/src/main/java/org/apache/pulsar/io/debezium/mssql/DebeziumMsSqlSource.java)
+
+  
 ### DynamoDB
 
 * [Configuration](io-dynamodb-source.md#configuration)
diff --git a/site2/website-next/versioned_docs/version-2.4.0/io-develop.md b/site2/website-next/versioned_docs/version-2.4.0/io-develop.md
index 2328f37..bbd6501 100644
--- a/site2/website-next/versioned_docs/version-2.4.0/io-develop.md
+++ b/site2/website-next/versioned_docs/version-2.4.0/io-develop.md
@@ -2,7 +2,6 @@
 id: io-develop
 title: How to develop Pulsar connectors
 sidebar_label: "Develop"
-original_id: io-develop
 ---
 
 import Tabs from '@theme/Tabs';
diff --git a/site2/website-next/versioned_docs/version-2.4.0/io-overview.md b/site2/website-next/versioned_docs/version-2.4.0/io-overview.md
index 68960a8..810de78 100644
--- a/site2/website-next/versioned_docs/version-2.4.0/io-overview.md
+++ b/site2/website-next/versioned_docs/version-2.4.0/io-overview.md
@@ -2,7 +2,6 @@
 id: io-overview
 title: Pulsar connector overview
 sidebar_label: "Overview"
-original_id: io-overview
 ---
 
 import Tabs from '@theme/Tabs';
diff --git a/site2/website-next/versioned_docs/version-2.4.0/pulsar-2.0.md b/site2/website-next/versioned_docs/version-2.4.0/pulsar-2.0.md
index 11c5e66..560c8c1 100644
--- a/site2/website-next/versioned_docs/version-2.4.0/pulsar-2.0.md
+++ b/site2/website-next/versioned_docs/version-2.4.0/pulsar-2.0.md
@@ -2,7 +2,6 @@
 id: pulsar-2.0
 title: Pulsar 2.0
 sidebar_label: "Pulsar 2.0"
-original_id: pulsar-2.0
 ---
 
 Pulsar 2.0 is a major new release for Pulsar that brings some bold changes to the platform, including [simplified topic names](#topic-names), the addition of the [Pulsar Functions](functions-overview) feature, some terminology changes, and more.
diff --git a/site2/website-next/versioned_docs/version-2.4.0/reference-pulsar-admin.md b/site2/website-next/versioned_docs/version-2.4.0/pulsar-admin.md
similarity index 100%
rename from site2/website-next/versioned_docs/version-2.4.0/reference-pulsar-admin.md
rename to site2/website-next/versioned_docs/version-2.4.0/pulsar-admin.md
diff --git a/site2/website-next/versioned_docs/version-2.4.0/reference-cli-tools.md b/site2/website-next/versioned_docs/version-2.4.0/reference-cli-tools.md
index 38f52d7..aa12b21 100644
--- a/site2/website-next/versioned_docs/version-2.4.0/reference-cli-tools.md
+++ b/site2/website-next/versioned_docs/version-2.4.0/reference-cli-tools.md
@@ -750,7 +750,7 @@ $ bookkeeper command
 ```
 
 Commands
-* `auto-recovery`
+* `autorecovery`
 * `bookie`
 * `localbookie`
 * `upgrade`
@@ -773,13 +773,13 @@ The table below lists the environment variables that you can use to configure th
 
 
 ### `auto-recovery`
-Runs an auto-recovery service daemon
+Runs an auto-recovery service
 
 Usage
 
 ```bash
 
-$ bookkeeper auto-recovery options
+$ bookkeeper autorecovery options
 
 ```
 
@@ -787,7 +787,7 @@ Options
 
 |Flag|Description|Default|
 |---|---|---|
-|`-c`, `--conf`|Configuration for the auto-recovery daemon||
+|`-c`, `--conf`|Configuration for the auto-recovery||
 
 
 ### `bookie`
@@ -805,7 +805,7 @@ Options
 
 |Flag|Description|Default|
 |---|---|---|
-|`-c`, `--conf`|Configuration for the auto-recovery daemon||
+|`-c`, `--conf`|Configuration for the auto-recovery||
 |-readOnly|Force start a read-only bookie server|false|
 |-withAutoRecovery|Start auto-recovery service bookie server|false|
 
@@ -836,7 +836,7 @@ Options
 
 |Flag|Description|Default|
 |---|---|---|
-|`-c`, `--conf`|Configuration for the auto-recovery daemon||
+|`-c`, `--conf`|Configuration for the auto-recovery||
 |`-u`, `--upgrade`|Upgrade the bookie’s directories||
 
 
diff --git a/site2/website-next/versioned_docs/version-2.4.0/reference-terminology.md b/site2/website-next/versioned_docs/version-2.4.0/reference-terminology.md
index d0e7368..ebc114d 100644
--- a/site2/website-next/versioned_docs/version-2.4.0/reference-terminology.md
+++ b/site2/website-next/versioned_docs/version-2.4.0/reference-terminology.md
@@ -2,7 +2,6 @@
 id: reference-terminology
 title: Pulsar Terminology
 sidebar_label: "Terminology"
-original_id: reference-terminology
 ---
 
 Here is a glossary of terms related to Apache Pulsar:
diff --git a/site2/website-next/versioned_docs/version-2.4.0/security-athenz.md b/site2/website-next/versioned_docs/version-2.4.0/security-athenz.md
index 947c3f4..ba27ba4 100644
--- a/site2/website-next/versioned_docs/version-2.4.0/security-athenz.md
+++ b/site2/website-next/versioned_docs/version-2.4.0/security-athenz.md
@@ -2,7 +2,6 @@
 id: security-athenz
 title: Authentication using Athenz
 sidebar_label: "Authentication using Athenz"
-original_id: security-athenz
 ---
 
 [Athenz](https://github.com/AthenZ/athenz) is a role-based authentication/authorization system. In Pulsar, you can use Athenz role tokens (also known as *z-tokens*) to establish the identify of the client.
diff --git a/site2/website-next/versioned_docs/version-2.4.0/security-authorization.md b/site2/website-next/versioned_docs/version-2.4.0/security-authorization.md
index e678587..b1003d2 100644
--- a/site2/website-next/versioned_docs/version-2.4.0/security-authorization.md
+++ b/site2/website-next/versioned_docs/version-2.4.0/security-authorization.md
@@ -2,7 +2,6 @@
 id: security-authorization
 title: Authentication and authorization in Pulsar
 sidebar_label: "Authorization and ACLs"
-original_id: security-authorization
 ---
 
 
diff --git a/site2/website-next/versioned_docs/version-2.4.0/security-encryption.md b/site2/website-next/versioned_docs/version-2.4.0/security-encryption.md
index cc43082..90d0dbe 100644
--- a/site2/website-next/versioned_docs/version-2.4.0/security-encryption.md
+++ b/site2/website-next/versioned_docs/version-2.4.0/security-encryption.md
@@ -2,7 +2,6 @@
 id: security-encryption
 title: Pulsar Encryption
 sidebar_label: "End-to-End Encryption"
-original_id: security-encryption
 ---
 
 import Tabs from '@theme/Tabs';
diff --git a/site2/website-next/versioned_docs/version-2.4.0/security-extending.md b/site2/website-next/versioned_docs/version-2.4.0/security-extending.md
index c088e3a..dd0030e 100644
--- a/site2/website-next/versioned_docs/version-2.4.0/security-extending.md
+++ b/site2/website-next/versioned_docs/version-2.4.0/security-extending.md
@@ -2,7 +2,6 @@
 id: security-extending
 title: Extending Authentication and Authorization in Pulsar
 sidebar_label: "Extending"
-original_id: security-extending
 ---
 
 Pulsar provides a way to use custom authentication and authorization mechanisms.
diff --git a/site2/website-next/versioned_docs/version-2.4.0/sql-deployment-configurations.md b/site2/website-next/versioned_docs/version-2.4.0/sql-deployment-configurations.md
index 6c6fd87..9e7ff5a 100644
--- a/site2/website-next/versioned_docs/version-2.4.0/sql-deployment-configurations.md
+++ b/site2/website-next/versioned_docs/version-2.4.0/sql-deployment-configurations.md
@@ -2,7 +2,6 @@
 id: sql-deployment-configurations
 title: Pulsar SQL configuration and deployment
 sidebar_label: "Configuration and deployment"
-original_id: sql-deployment-configurations
 ---
 
 You can configure Presto Pulsar connector and deploy a cluster with the following instruction.
@@ -27,6 +26,84 @@ pulsar.entry-read-batch-size=100
 # default number of splits to use per query
 pulsar.target-num-splits=4
 
+# max size of one batch message (default value is 5MB)
+pulsar.max-message-size=5242880
+
+# number of split used when querying data from pulsar
+pulsar.target-num-splits=2
+
+# size of queue to buffer entry read from pulsar
+pulsar.max-split-entry-queue-size=1000
+
+# size of queue to buffer message extract from entries
+pulsar.max-split-message-queue-size=10000
+
+# status provider to record connector metrics
+pulsar.stats-provider=org.apache.bookkeeper.stats.NullStatsProvider
+
+# config in map format for stats provider e.g. {"key1":"val1","key2":"val2"}
+pulsar.stats-provider-configs={}
+
+# whether to rewrite Pulsar's default topic delimiter '/'
+pulsar.namespace-delimiter-rewrite-enable=false
+
+# delimiter used to rewrite Pulsar's default delimiter '/', use if default is causing incompatibility with other system like Superset
+pulsar.rewrite-namespace-delimiter=“/”
+
+# maximum number of thread pool size for ledger offloader.
+pulsar.managed-ledger-offload-max-threads=2
+
+# driver used to offload or read cold data to or from long-term storage
+pulsar.managed-ledger-offload-driver=null
+
+# directory to load offloaders nar file.
+pulsar.offloaders-directory="./offloaders"
+
+# properties and configurations related to specific offloader implementation as map e.g. {"key1":"val1","key2":"val2"}
+pulsar.offloader-properties={}
+
+# authentication plugin used to authenticate to Pulsar cluster
+pulsar.auth-plugin=null
+
+# authentication parameter used to authenticate to the Pulsar cluster as a string e.g. "key1:val1,key2:val2".
+pulsar.auth-params=null
+
+# whether the Pulsar client accept an untrusted TLS certificate from broker
+pulsar.tls-allow-insecure-connection=null
+
+# whether to allow hostname verification when a client connects to broker over TLS.
+pulsar.tls-hostname-verification-enable=null
+
+# path for the trusted TLS certificate file of Pulsar broker
+pulsar.tls-trust-cert-file-path=null
+
+# set the threshold for BookKeeper request throttle, default is disabled
+pulsar.bookkeeper-throttle-value=0
+
+# set the number of IO thread
+pulsar.bookkeeper-num-io-threads=2 * Runtime.getRuntime().availableProcessors()
+
+# set the number of worker thread
+pulsar.bookkeeper-num-worker-threads=Runtime.getRuntime().availableProcessors()
+
+# whether to use BookKeeper V2 wire protocol
+pulsar.bookkeeper-use-v2-protocol=true
+
+# interval to check the need for sending an explicit LAC, default is disabled
+pulsar.bookkeeper-explicit-interval=0
+
+# size for managed ledger entry cache (in MB).
+pulsar.managed-ledger-cache-size-MB=0
+
+# number of threads to be used for managed ledger tasks dispatching
+pulsar.managed-ledger-num-worker-threads=Runtime.getRuntime().availableProcessors()
+
+# number of threads to be used for managed ledger scheduled tasks
+pulsar.managed-ledger-num-scheduler-threads=Runtime.getRuntime().availableProcessors()
+
+# directory used to store extraction NAR file
+pulsar.nar-extraction-directory=System.getProperty("java.io.tmpdir")
+
 ```
 
 You can connect Presto to a Pulsar cluster with multiple hosts. To configure multiple hosts for brokers, add multiple URLs to `pulsar.web-service-url`. To configure multiple hosts for ZooKeeper, add multiple URIs to `pulsar.zookeeper-uri`. The following is an example.
@@ -38,6 +115,21 @@ pulsar.zookeeper-uri=localhost1,localhost2:2181
 
 ```
 
+A frequently asked question is why my latest message not showing up when querying with Pulsar SQL.
+It's not a bug but controlled by a setting, by default BookKeeper LAC only advanced when subsequent entries are added.
+If there is no subsequent entries added, the last entry written will not be visible to readers until the ledger is closed.
+This is not a problem for Pulsar which uses managed ledger, but Pulsar SQL directly read from BookKeeper ledger.
+We can add following setting to change the behavior:
+In Broker config, set
+bookkeeperExplicitLacIntervalInMills > 0
+bookkeeperUseV2WireProtocol=false
+
+And in Presto config, set
+pulsar.bookkeeper-explicit-interval > 0
+pulsar.bookkeeper-use-v2-protocol=false
+
+However,keep in mind that using bk V3 protocol will introduce additional GC overhead to BK as it uses Protobuf.
+
 ## Query data from existing Presto clusters
 
 If you already have a Presto cluster, you can copy the Presto Pulsar connector plugin to your existing cluster. Download the archived plugin package with the following command.
diff --git a/site2/website-next/versioned_docs/version-2.4.0/sql-overview.md b/site2/website-next/versioned_docs/version-2.4.0/sql-overview.md
index 4a4d5f0..2f827f4 100644
--- a/site2/website-next/versioned_docs/version-2.4.0/sql-overview.md
+++ b/site2/website-next/versioned_docs/version-2.4.0/sql-overview.md
@@ -2,7 +2,6 @@
 id: sql-overview
 title: Pulsar SQL Overview
 sidebar_label: "Overview"
-original_id: sql-overview
 ---
 
 Apache Pulsar is used to store streams of event data, and the event data is structured with predefined fields. With the implementation of the [Schema Registry](schema-get-started), you can store structured data in Pulsar and query the data by using [Trino (formerly Presto SQL)](https://trino.io/).
diff --git a/site2/website-next/versioned_docs/version-2.4.0/standalone-docker.md b/site2/website-next/versioned_docs/version-2.4.0/standalone-docker.md
index 05ac2a1..7ee20c2 100644
--- a/site2/website-next/versioned_docs/version-2.4.0/standalone-docker.md
+++ b/site2/website-next/versioned_docs/version-2.4.0/standalone-docker.md
@@ -2,14 +2,11 @@
 id: standalone-docker
 title: Set up a standalone Pulsar in Docker
 sidebar_label: "Run Pulsar in Docker"
-original_id: standalone-docker
 ---
 
-For local development and testing, you can run Pulsar in standalone
-mode on your own machine within a Docker container.
+For local development and testing, you can run Pulsar in standalone mode on your own machine within a Docker container. 
 
-If you have not installed Docker, download the [Community edition](https://www.docker.com/community-edition)
-and follow the instructions for your OS.
+If you have not installed Docker, download the [Community edition](https://www.docker.com/community-edition) and follow the instructions for your OS.
 
 ## Start Pulsar in Docker
 
@@ -17,13 +14,7 @@ and follow the instructions for your OS.
 
   ```shell
   
-  $ docker run -it \
-  -p 6650:6650 \
-  -p 8080:8080 \
-  --mount source=pulsardata,target=/pulsar/data \
-  --mount source=pulsarconf,target=/pulsar/conf \
-  apachepulsar/pulsar:@pulsar:version@ \
-  bin/pulsar standalone
+  $ docker run -it -p 6650:6650  -p 8080:8080 --mount source=pulsardata,target=/pulsar/data --mount source=pulsarconf,target=/pulsar/conf apachepulsar/pulsar:@pulsar:version@ bin/pulsar standalone
   
   ```
 
@@ -36,8 +27,9 @@ If you start Pulsar successfully, you will see `INFO`-level log messages like th
 
 ```
 
-2017-08-09 22:34:04,030 - INFO  - [main:WebService@213] - Web Service started at http://127.0.0.1:8080
-2017-08-09 22:34:04,038 - INFO  - [main:PulsarService@335] - messaging service is ready, bootstrap service on port=8080, broker url=pulsar://127.0.0.1:6650, cluster=standalone, configs=org.apache.pulsar.broker.ServiceConfiguration@4db60246
+08:18:30.970 [main] INFO  org.apache.pulsar.broker.web.WebService - HTTP Service started at http://0.0.0.0:8080
+...
+07:53:37.322 [main] INFO  org.apache.pulsar.broker.PulsarService - messaging service is ready, bootstrap service port = 8080, broker url= pulsar://localhost:6650, cluster=standalone, configs=org.apache.pulsar.broker.ServiceConfiguration@98b63c1
 ...
 
 ```
@@ -60,7 +52,7 @@ use one of these root URLs to interact with your cluster:
 * `pulsar://localhost:6650`
 * `http://localhost:8080`
 
-The following example will guide you get started with Pulsar quickly by using the [Python](client-libraries-python)
+The following example will guide you get started with Pulsar quickly by using the [Python client API](client-libraries-python)
 client API.
 
 Install the Pulsar Python client library directly from [PyPI](https://pypi.org/project/pulsar-client/):
@@ -128,51 +120,93 @@ The output is something like this:
 ```json
 
 {
-  "averageMsgSize": 0.0,
-  "msgRateIn": 0.0,
-  "msgRateOut": 0.0,
-  "msgThroughputIn": 0.0,
-  "msgThroughputOut": 0.0,
-  "publishers": [
-    {
-      "address": "/172.17.0.1:35048",
-      "averageMsgSize": 0.0,
-      "clientVersion": "1.19.0-incubating",
-      "connectedSince": "2017-08-09 20:59:34.621+0000",
-      "msgRateIn": 0.0,
-      "msgThroughputIn": 0.0,
-      "producerId": 0,
-      "producerName": "standalone-0-1"
-    }
-  ],
-  "replication": {},
-  "storageSize": 16,
-  "subscriptions": {
-    "my-sub": {
-      "blockedSubscriptionOnUnackedMsgs": false,
-      "consumers": [
+    "msgRateIn": 0.0,
+    "msgThroughputIn": 0.0,
+    "msgRateOut": 1.8332950480217471,
+    "msgThroughputOut": 91.33142602871978,
+    "bytesInCounter": 7097,
+    "msgInCounter": 143,
+    "bytesOutCounter": 6607,
+    "msgOutCounter": 133,
+    "averageMsgSize": 0.0,
+    "msgChunkPublished": false,
+    "storageSize": 7097,
+    "backlogSize": 0,
+    "offloadedStorageSize": 0,
+    "publishers": [
         {
-          "address": "/172.17.0.1:35064",
-          "availablePermits": 996,
-          "blockedConsumerOnUnackedMsgs": false,
-          "clientVersion": "1.19.0-incubating",
-          "connectedSince": "2017-08-09 21:05:39.222+0000",
-          "consumerName": "166111",
-          "msgRateOut": 0.0,
-          "msgRateRedeliver": 0.0,
-          "msgThroughputOut": 0.0,
-          "unackedMessages": 0
+            "accessMode": "Shared",
+            "msgRateIn": 0.0,
+            "msgThroughputIn": 0.0,
+            "averageMsgSize": 0.0,
+            "chunkedMessageRate": 0.0,
+            "producerId": 0,
+            "metadata": {},
+            "address": "/127.0.0.1:35604",
+            "connectedSince": "2021-07-04T09:05:43.04788Z",
+            "clientVersion": "2.8.0",
+            "producerName": "standalone-2-5"
+        }
+    ],
+    "waitingPublishers": 0,
+    "subscriptions": {
+        "my-sub": {
+            "msgRateOut": 1.8332950480217471,
+            "msgThroughputOut": 91.33142602871978,
+            "bytesOutCounter": 6607,
+            "msgOutCounter": 133,
+            "msgRateRedeliver": 0.0,
+            "chunkedMessageRate": 0,
+            "msgBacklog": 0,
+            "backlogSize": 0,
+            "msgBacklogNoDelayed": 0,
+            "blockedSubscriptionOnUnackedMsgs": false,
+            "msgDelayed": 0,
+            "unackedMessages": 0,
+            "type": "Exclusive",
+            "activeConsumerName": "3c544f1daa",
+            "msgRateExpired": 0.0,
+            "totalMsgExpired": 0,
+            "lastExpireTimestamp": 0,
+            "lastConsumedFlowTimestamp": 1625389101290,
+            "lastConsumedTimestamp": 1625389546070,
+            "lastAckedTimestamp": 1625389546162,
+            "lastMarkDeleteAdvancedTimestamp": 1625389546163,
+            "consumers": [
+                {
+                    "msgRateOut": 1.8332950480217471,
+                    "msgThroughputOut": 91.33142602871978,
+                    "bytesOutCounter": 6607,
+                    "msgOutCounter": 133,
+                    "msgRateRedeliver": 0.0,
+                    "chunkedMessageRate": 0.0,
+                    "consumerName": "3c544f1daa",
+                    "availablePermits": 867,
+                    "unackedMessages": 0,
+                    "avgMessagesPerEntry": 6,
+                    "blockedConsumerOnUnackedMsgs": false,
+                    "lastAckedTimestamp": 1625389546162,
+                    "lastConsumedTimestamp": 1625389546070,
+                    "metadata": {},
+                    "address": "/127.0.0.1:35472",
+                    "connectedSince": "2021-07-04T08:58:21.287682Z",
+                    "clientVersion": "2.8.0"
+                }
+            ],
+            "isDurable": true,
+            "isReplicated": false,
+            "allowOutOfOrderDelivery": false,
+            "consumersAfterMarkDeletePosition": {},
+            "nonContiguousDeletedMessagesRanges": 0,
+            "nonContiguousDeletedMessagesRangesSerializedSize": 0,
+            "durable": true,
+            "replicated": false
         }
-      ],
-      "msgBacklog": 0,
-      "msgRateExpired": 0.0,
-      "msgRateOut": 0.0,
-      "msgRateRedeliver": 0.0,
-      "msgThroughputOut": 0.0,
-      "type": "Exclusive",
-      "unackedMessages": 0
-    }
-  }
+    },
+    "replication": {},
+    "deduplicationStatus": "Disabled",
+    "nonContiguousDeletedMessagesRanges": 0,
+    "nonContiguousDeletedMessagesRangesSerializedSize": 0
 }
 
 ```
diff --git a/site2/website-next/versioned_docs/version-2.4.0/getting-started-standalone.md b/site2/website-next/versioned_docs/version-2.4.0/standalone.md
similarity index 100%
rename from site2/website-next/versioned_docs/version-2.4.0/getting-started-standalone.md
rename to site2/website-next/versioned_docs/version-2.4.0/standalone.md
diff --git a/site2/website-next/versioned_docs/version-2.4.1/adaptors-kafka.md b/site2/website-next/versioned_docs/version-2.4.1/adaptors-kafka.md
index ad0d886..27382e9 100644
--- a/site2/website-next/versioned_docs/version-2.4.1/adaptors-kafka.md
+++ b/site2/website-next/versioned_docs/version-2.4.1/adaptors-kafka.md
@@ -2,7 +2,6 @@
 id: adaptors-kafka
 title: Pulsar adaptor for Apache Kafka
 sidebar_label: "Kafka client wrapper"
-original_id: adaptors-kafka
 ---
 
 
@@ -261,6 +260,7 @@ You can configure Pulsar authentication provider directly from the Kafka propert
 | [`pulsar.producer.batching.enabled`](http://pulsar.apache.org/api/client/org/apache/pulsar/client/api/ProducerConfiguration.html#setBatchingEnabled-boolean-) | `true` | Control whether automatic batching of messages is enabled for the producer. |
 | [`pulsar.producer.batching.max.messages`](http://pulsar.apache.org/api/client/org/apache/pulsar/client/api/ProducerConfiguration.html#setBatchingMaxMessages-int-) | `1000` | The maximum number of messages in a batch. |
 | [`pulsar.block.if.producer.queue.full`](http://pulsar.apache.org/api/client/org/apache/pulsar/client/api/ProducerConfiguration.html#setBlockIfQueueFull-boolean-) | | Specify the block producer if queue  is full. |
+| [`pulsar.crypto.reader.factory.class.name`](http://pulsar.apache.org/api/client/org/apache/pulsar/client/api/ProducerConfiguration.html#setCryptoKeyReader-org.apache.pulsar.client.api.CryptoKeyReader-) | | Specify the CryptoReader-Factory(`CryptoKeyReaderFactory`) classname which allows producer to create CryptoKeyReader. |
 
 
 ### Pulsar consumer Properties
@@ -272,3 +272,4 @@ You can configure Pulsar authentication provider directly from the Kafka propert
 | [`pulsar.consumer.acknowledgments.group.time.millis`](http://pulsar.apache.org/api/client/org/apache/pulsar/client/api/ConsumerBuilder.html#acknowledgmentGroupTime-long-java.util.concurrent.TimeUnit-) | 100 | Set the maximum amount of group time for consumers to send the acknowledgments to the broker. |
 | [`pulsar.consumer.total.receiver.queue.size.across.partitions`](http://pulsar.apache.org/api/client/org/apache/pulsar/client/api/ConsumerConfiguration.html#setMaxTotalReceiverQueueSizeAcrossPartitions-int-) | 50000 | Set the maximum size of the total receiver queue across partitions. |
 | [`pulsar.consumer.subscription.topics.mode`](http://pulsar.apache.org/api/client/org/apache/pulsar/client/api/ConsumerBuilder.html#subscriptionTopicsMode-Mode-) | PersistentOnly | Set the subscription topic mode for consumers. |
+| [`pulsar.crypto.reader.factory.class.name`](http://pulsar.apache.org/api/client/org/apache/pulsar/client/api/ProducerConfiguration.html#setCryptoKeyReader-org.apache.pulsar.client.api.CryptoKeyReader-) | | Specify the CryptoReader-Factory(`CryptoKeyReaderFactory`) classname which allows consumer to create CryptoKeyReader. |
diff --git a/site2/website-next/versioned_docs/version-2.4.1/adaptors-spark.md b/site2/website-next/versioned_docs/version-2.4.1/adaptors-spark.md
index e14f13b..afa5a7e 100644
--- a/site2/website-next/versioned_docs/version-2.4.1/adaptors-spark.md
+++ b/site2/website-next/versioned_docs/version-2.4.1/adaptors-spark.md
@@ -2,7 +2,6 @@
 id: adaptors-spark
 title: Pulsar adaptor for Apache Spark
 sidebar_label: "Apache Spark"
-original_id: adaptors-spark
 ---
 
 ## Spark Streaming receiver
diff --git a/site2/website-next/versioned_docs/version-2.4.1/adaptors-storm.md b/site2/website-next/versioned_docs/version-2.4.1/adaptors-storm.md
index 76d5071..9df9076 100644
--- a/site2/website-next/versioned_docs/version-2.4.1/adaptors-storm.md
+++ b/site2/website-next/versioned_docs/version-2.4.1/adaptors-storm.md
@@ -2,7 +2,6 @@
 id: adaptors-storm
 title: Pulsar adaptor for Apache Storm
 sidebar_label: "Apache Storm"
-original_id: adaptors-storm
 ---
 
 Pulsar Storm is an adaptor for integrating with [Apache Storm](http://storm.apache.org/) topologies. It provides core Storm implementations for sending and receiving data.
diff --git a/site2/website-next/versioned_docs/version-2.4.1/admin-api-brokers.md b/site2/website-next/versioned_docs/version-2.4.1/admin-api-brokers.md
index dbac453..10a90ca 100644
--- a/site2/website-next/versioned_docs/version-2.4.1/admin-api-brokers.md
+++ b/site2/website-next/versioned_docs/version-2.4.1/admin-api-brokers.md
@@ -2,7 +2,6 @@
 id: admin-api-brokers
 title: Managing Brokers
 sidebar_label: "Brokers"
-original_id: admin-api-brokers
 ---
 
 import Tabs from '@theme/Tabs';
@@ -26,9 +25,9 @@ Pulsar brokers consist of two components:
 
 [Brokers](reference-terminology.md#broker) can be managed via:
 
-* The [`brokers`](reference-pulsar-admin.md#brokers) command of the [`pulsar-admin`](reference-pulsar-admin) tool
+* The `brokers` command of the [`pulsar-admin`](https://pulsar.apache.org/tools/pulsar-admin/) tool
 * The `/admin/v2/brokers` endpoint of the admin {@inject: rest:REST:/} API
-* The `brokers` method of the {@inject: javadoc:PulsarAdmin:/admin/org/apache/pulsar/client/admin/PulsarAdmin.html} object in the [Java API](client-libraries-java)
+* The `brokers` method of the `PulsarAdmin` object in the [Java API](client-libraries-java)
 
 In addition to being configurable when you start them up, brokers can also be [dynamically configured](#dynamic-broker-configuration).
 
diff --git a/site2/website-next/versioned_docs/version-2.4.1/admin-api-clusters.md b/site2/website-next/versioned_docs/version-2.4.1/admin-api-clusters.md
index 972c7e1..8687ae6 100644
--- a/site2/website-next/versioned_docs/version-2.4.1/admin-api-clusters.md
+++ b/site2/website-next/versioned_docs/version-2.4.1/admin-api-clusters.md
@@ -2,7 +2,6 @@
 id: admin-api-clusters
 title: Managing Clusters
 sidebar_label: "Clusters"
-original_id: admin-api-clusters
 ---
 
 import Tabs from '@theme/Tabs';
@@ -13,7 +12,7 @@ import TabItem from '@theme/TabItem';
 >
 > This page only shows **some frequently used operations**.
 >
-> - For the latest and complete information about `Pulsar admin`, including commands, flags, descriptions, and more information, see [Pulsar admin doc](https://pulsar.apache.org/tools/pulsar-admin/).
+> - For the latest and complete information about `Pulsar admin`, including commands, flags, descriptions, and more, see [Pulsar admin doc](https://pulsar.apache.org/tools/pulsar-admin/)
 > 
 > - For the latest and complete information about `REST API`, including parameters, responses, samples, and more, see {@inject: rest:REST:/} API doc.
 > 
@@ -24,9 +23,9 @@ servers (aka [bookies](reference-terminology.md#bookie)), and a [ZooKeeper](http
 
 Clusters can be managed via:
 
-* The [`clusters`](reference-pulsar-admin.md#clusters) command of the [`pulsar-admin`](reference-pulsar-admin) tool
+* The `clusters` command of the [`pulsar-admin`]([reference-pulsar-admin.md](https://pulsar.apache.org/tools/pulsar-admin/)) tool
 * The `/admin/v2/clusters` endpoint of the admin {@inject: rest:REST:/} API
-* The `clusters` method of the {@inject: javadoc:PulsarAdmin:/admin/org/apache/pulsar/client/admin/PulsarAdmin} object in the [Java API](client-libraries-java)
+* The `clusters` method of the `PulsarAdmin` object in the [Java API](client-libraries-java)
 
 ## Clusters resources
 
diff --git a/site2/website-next/versioned_docs/version-2.4.1/admin-api-namespaces.md b/site2/website-next/versioned_docs/version-2.4.1/admin-api-namespaces.md
index 216cb6f..c53fa3c 100644
--- a/site2/website-next/versioned_docs/version-2.4.1/admin-api-namespaces.md
+++ b/site2/website-next/versioned_docs/version-2.4.1/admin-api-namespaces.md
@@ -2,7 +2,6 @@
 id: admin-api-namespaces
 title: Managing Namespaces
 sidebar_label: "Namespaces"
-original_id: admin-api-namespaces
 ---
 
 import Tabs from '@theme/Tabs';
@@ -23,9 +22,9 @@ Pulsar [namespaces](reference-terminology.md#namespace) are logical groupings of
 
 Namespaces can be managed via:
 
-* The [`namespaces`](reference-pulsar-admin.md#clusters) command of the [`pulsar-admin`](reference-pulsar-admin) tool
+* The `namespaces` command of the [`pulsar-admin`](https://pulsar.apache.org/tools/pulsar-admin/) tool
 * The `/admin/v2/namespaces` endpoint of the admin {@inject: rest:REST:/} API
-* The `namespaces` method of the {@inject: javadoc:PulsarAdmin:/admin/org/apache/pulsar/client/admin/PulsarAdmin} object in the [Java API](client-libraries-java)
+* The `namespaces` method of the `PulsarAdmin` object in the [Java API](client-libraries-java)
 
 ## Namespaces resources
 
@@ -49,8 +48,12 @@ $ pulsar-admin namespaces create test-tenant/test-namespace
 </TabItem>
 <TabItem value="REST API">
 
+```
+
 {@inject: endpoint|PUT|/admin/v2/namespaces/:tenant/:namespace|operation/createNamespace?version=@pulsar:version_number@}
 
+```
+
 </TabItem>
 <TabItem value="Java">
 
@@ -105,8 +108,12 @@ $ pulsar-admin namespaces policies test-tenant/test-namespace
 </TabItem>
 <TabItem value="REST API">
 
+```
+
 {@inject: endpoint|GET|/admin/v2/namespaces/:tenant/:namespace|operation/getPolicies?version=@pulsar:version_number@}
 
+```
+
 </TabItem>
 <TabItem value="Java">
 
@@ -142,8 +149,12 @@ test-tenant/ns2
 </TabItem>
 <TabItem value="REST API">
 
+```
+
 {@inject: endpoint|GET|/admin/v2/namespaces/:tenant|operation/getTenantNamespaces?version=@pulsar:version_number@}
 
+```
+
 </TabItem>
 <TabItem value="Java">
 
@@ -177,8 +188,12 @@ $ pulsar-admin namespaces delete test-tenant/ns1
 </TabItem>
 <TabItem value="REST API">
 
+```
+
 {@inject: endpoint|DELETE|/admin/v2/namespaces/:tenant/:namespace|operation/deleteNamespace?version=@pulsar:version_number@}
 
+```
+
 </TabItem>
 <TabItem value="Java">
 
@@ -196,7 +211,7 @@ admin.namespaces().deleteNamespace(namespace);
 
 #### Set replication cluster
 
-It sets replication clusters for a namespace, so Pulsar can internally replicate publish message from one colo to another colo.
+You can set replication clusters for a namespace to enable Pulsar to internally replicate the published messages from one colocation facility to another.
 
 <Tabs 
   defaultValue="pulsar-admin"
@@ -234,7 +249,7 @@ admin.namespaces().setNamespaceReplicationClusters(namespace, clusters);
 
 #### Get replication cluster
 
-It gives a list of replication clusters for a given namespace.
+You can get the list of replication clusters for a given namespace.
 
 <Tabs 
   defaultValue="pulsar-admin"
@@ -281,13 +296,13 @@ admin.namespaces().getNamespaceReplicationClusters(namespace)
 
 Backlog quota helps the broker to restrict bandwidth/storage of a namespace once it reaches a certain threshold limit. Admin can set the limit and take corresponding action after the limit is reached.
 
-  1.  producer_request_hold: broker will hold and not persist produce request payload
+  1.  producer_request_hold: broker holds but not persists produce request payload
 
-  2.  producer_exception: broker disconnects with the client by giving an exception.
+  2.  producer_exception: broker disconnects with the client by giving an exception
 
-  3.  consumer_backlog_eviction: broker will start discarding backlog messages
+  3.  consumer_backlog_eviction: broker starts discarding backlog messages
 
-  Backlog quota restriction can be taken care by defining restriction of backlog-quota-type: destination_storage
+Backlog quota restriction can be taken care by defining restriction of backlog-quota-type: destination_storage.
 
 <Tabs 
   defaultValue="pulsar-admin"
@@ -300,12 +315,6 @@ $ pulsar-admin namespaces set-backlog-quota --limit 10G --limitTime 36000 --poli
 
 ```
 
-```
-
-N/A
-
-```
-
 </TabItem>
 <TabItem value="REST API">
 
@@ -330,7 +339,7 @@ admin.namespaces().setBacklogQuota(namespace, new BacklogQuota(limit, limitTime,
 
 #### Get backlog quota policies
 
-It shows a configured backlog quota for a given namespace.
+You can get a configured backlog quota for a given namespace.
 
 <Tabs 
   defaultValue="pulsar-admin"
@@ -378,7 +387,7 @@ admin.namespaces().getBacklogQuotaMap(namespace);
 
 #### Remove backlog quota policies
 
-It removes backlog quota policies for a given namespace
+You can remove backlog quota policies for a given namespace.
 
 <Tabs 
   defaultValue="pulsar-admin"
@@ -391,12 +400,6 @@ $ pulsar-admin namespaces remove-backlog-quota test-tenant/ns1
 
 ```
 
-```
-
-N/A
-
-```
-
 </TabItem>
 <TabItem value="REST API">
 
@@ -423,7 +426,7 @@ admin.namespaces().removeBacklogQuota(namespace, backlogQuotaType)
 
 #### Set persistence policies
 
-Persistence policies allow to configure persistency-level for all topic messages under a given namespace.
+Persistence policies allow users to configure persistency-level for all topic messages under a given namespace.
 
   -   Bookkeeper-ack-quorum: Number of acks (guaranteed copies) to wait for each entry, default: 0
 
@@ -444,12 +447,6 @@ $ pulsar-admin namespaces set-persistence --bookkeeper-ack-quorum 2 --bookkeeper
 
 ```
 
-```
-
-N/A
-
-```
-
 </TabItem>
 <TabItem value="REST API">
 
@@ -474,7 +471,7 @@ admin.namespaces().setPersistence(namespace,new PersistencePolicies(bookkeeperEn
 
 #### Get persistence policies
 
-It shows the configured persistence policies of a given namespace.
+You can get the configured persistence policies of a given namespace.
 
 <Tabs 
   defaultValue="pulsar-admin"
@@ -537,12 +534,6 @@ $ pulsar-admin namespaces unload --bundle 0x00000000_0xffffffff test-tenant/ns1
 
 ```
 
-```
-
-N/A
-
-```
-
 </TabItem>
 <TabItem value="REST API">
 
@@ -567,8 +558,7 @@ admin.namespaces().unloadNamespaceBundle(namespace, bundle)
 
 #### Split namespace bundles
 
-Each namespace bundle can contain multiple topics and each bundle can be served by only one broker. 
-If a single bundle is creating an excessive load on a broker, an admin splits the bundle using this command permitting one or more of the new bundles to be unloaded thus spreading the load across the brokers.
+One namespace bundle can contain multiple topics but can be served by only one broker. If a single bundle is creating an excessive load on a broker, an admin can split the bundle using the command below, permitting one or more of the new bundles to be unloaded, thus balancing the load across the brokers.
 
 <Tabs 
   defaultValue="pulsar-admin"
@@ -581,12 +571,6 @@ $ pulsar-admin namespaces split-bundle --bundle 0x00000000_0xffffffff test-tenan
 
 ```
 
-```
-
-N/A
-
-```
-
 </TabItem>
 <TabItem value="REST API">
 
@@ -613,7 +597,7 @@ admin.namespaces().splitNamespaceBundle(namespace, bundle)
 
 #### Set message-ttl
 
-It configures message’s time to live (in seconds) duration.
+You can configure the time to live (in seconds) duration for messages. In the example below, the message-ttl is set as 100s.
 
 <Tabs 
   defaultValue="pulsar-admin"
@@ -626,12 +610,6 @@ $ pulsar-admin namespaces set-message-ttl --messageTTL 100 test-tenant/ns1
 
 ```
 
-```
-
-N/A
-
-```
-
 </TabItem>
 <TabItem value="REST API">
 
@@ -656,7 +634,7 @@ admin.namespaces().setNamespaceMessageTTL(namespace, messageTTL)
 
 #### Get message-ttl
 
-It gives a message ttl of configured namespace.
+When the message-ttl for a namespace is set, you can use the command below to get the configured value. This example comtinues the example of the command `set message-ttl`, so the returned value is 100(s).
 
 <Tabs 
   defaultValue="pulsar-admin"
@@ -684,6 +662,12 @@ $ pulsar-admin namespaces get-message-ttl test-tenant/ns1
 
 ```
 
+```
+
+100
+
+```
+
 </TabItem>
 <TabItem value="Java">
 
@@ -693,6 +677,12 @@ admin.namespaces().getNamespaceMessageTTL(namespace)
 
 ```
 
+```
+
+100
+
+```
+
 </TabItem>
 
 </Tabs>
@@ -712,12 +702,6 @@ $ pulsar-admin namespaces remove-message-ttl test-tenant/ns1
 
 ```
 
-```
-
-100
-
-```
-
 </TabItem>
 <TabItem value="REST API">
 
@@ -758,12 +742,6 @@ $ pulsar-admin namespaces clear-backlog --sub my-subscription test-tenant/ns1
 
 ```
 
-```
-
-N/A
-
-```
-
 </TabItem>
 <TabItem value="REST API">
 
@@ -801,12 +779,6 @@ $ pulsar-admin namespaces clear-backlog  --bundle 0x00000000_0xffffffff  --sub m
 
 ```
 
-```
-
-N/A
-
-```
-
 </TabItem>
 <TabItem value="REST API">
 
@@ -842,13 +814,7 @@ Each namespace contains multiple topics and the retention size (storage size) of
 
 ```
 
-$ pulsar-admin set-retention --size 100 --time 10 test-tenant/ns1
-
-```
-
-```
-
-N/A
+$ pulsar-admin namespaces set-retention --size 100 --time 10 test-tenant/ns1
 
 ```
 
@@ -932,9 +898,7 @@ disables the throttling.
 :::note
 
 - If neither `clusterDispatchRate` nor `topicDispatchRate` is configured, dispatch throttling is disabled.
->
 - If `topicDispatchRate` is not configured, `clusterDispatchRate` takes effect.
-> 
 - If `topicDispatchRate` is configured, `topicDispatchRate` takes effect.
 
 :::
diff --git a/site2/website-next/versioned_docs/version-2.4.1/admin-api-permissions.md b/site2/website-next/versioned_docs/version-2.4.1/admin-api-permissions.md
index e2ca469..faedbf1 100644
--- a/site2/website-next/versioned_docs/version-2.4.1/admin-api-permissions.md
+++ b/site2/website-next/versioned_docs/version-2.4.1/admin-api-permissions.md
@@ -2,7 +2,6 @@
 id: admin-api-permissions
 title: Managing permissions
 sidebar_label: "Permissions"
-original_id: admin-api-permissions
 ---
 
 import Tabs from '@theme/Tabs';
@@ -13,7 +12,7 @@ import TabItem from '@theme/TabItem';
 >
 > This page only shows **some frequently used operations**.
 >
-> - For the latest and complete information about `Pulsar admin`, including commands, flags, descriptions, and more information, see [Pulsar admin doc](https://pulsar.apache.org/tools/pulsar-admin/).
+> - For the latest and complete information about `Pulsar admin`, including commands, flags, descriptions, and more, see [Pulsar admin doc](https://pulsar.apache.org/tools/pulsar-admin/)
 > 
 > - For the latest and complete information about `REST API`, including parameters, responses, samples, and more, see {@inject: rest:REST:/} API doc.
 > 
diff --git a/site2/website-next/versioned_docs/version-2.4.1/admin-api-persistent-topics.md b/site2/website-next/versioned_docs/version-2.4.1/admin-api-persistent-topics.md
index b6d293b..8a7abae 100644
--- a/site2/website-next/versioned_docs/version-2.4.1/admin-api-persistent-topics.md
+++ b/site2/website-next/versioned_docs/version-2.4.1/admin-api-persistent-topics.md
@@ -2,7 +2,6 @@
 id: admin-api-persistent-topics
 title: Managing persistent topics
 sidebar_label: "Persistent topics"
-original_id: admin-api-persistent-topics
 ---
 
 For details of the content, refer to [manage topics](admin-api-topics).
\ No newline at end of file
diff --git a/site2/website-next/versioned_docs/version-2.4.1/admin-api-schemas.md b/site2/website-next/versioned_docs/version-2.4.1/admin-api-schemas.md
index 9ffe21f..8399a03 100644
--- a/site2/website-next/versioned_docs/version-2.4.1/admin-api-schemas.md
+++ b/site2/website-next/versioned_docs/version-2.4.1/admin-api-schemas.md
@@ -2,6 +2,5 @@
 id: admin-api-schemas
 title: Managing Schemas
 sidebar_label: "Schemas"
-original_id: admin-api-schemas
 ---
 
diff --git a/site2/website-next/versioned_docs/version-2.4.1/admin-api-tenants.md b/site2/website-next/versioned_docs/version-2.4.1/admin-api-tenants.md
index fe68336..570ac31 100644
--- a/site2/website-next/versioned_docs/version-2.4.1/admin-api-tenants.md
+++ b/site2/website-next/versioned_docs/version-2.4.1/admin-api-tenants.md
@@ -2,7 +2,6 @@
 id: admin-api-tenants
 title: Managing Tenants
 sidebar_label: "Tenants"
-original_id: admin-api-tenants
 ---
 
 import Tabs from '@theme/Tabs';
@@ -13,7 +12,7 @@ import TabItem from '@theme/TabItem';
 >
 > This page only shows **some frequently used operations**.
 >
-> - For the latest and complete information about `Pulsar admin`, including commands, flags, descriptions, and more information, see [Pulsar admin doc](https://pulsar.apache.org/tools/pulsar-admin/).
+> - For the latest and complete information about `Pulsar admin`, including commands, flags, descriptions, and more, see [Pulsar admin doc](https://pulsar.apache.org/tools/pulsar-admin/)
 > 
 > - For the latest and complete information about `REST API`, including parameters, responses, samples, and more, see {@inject: rest:REST:/} API doc.
 > 
@@ -80,22 +79,26 @@ $ pulsar-admin tenants create my-tenant
 
 ```
 
-When creating a tenant, you can assign admin roles using the `-r`/`--admin-roles` flag. You can specify multiple roles as a comma-separated list. Here are some examples:
+When creating a tenant, you can optionally assign admin roles using the `-r`/`--admin-roles`
+flag, and clusters using the `-c`/`--allowed-clusters` flag. You can specify multiple values
+as a comma-separated list. Here are some examples:
 
 ```shell
 
 $ pulsar-admin tenants create my-tenant \
-  --admin-roles role1,role2,role3
+  --admin-roles role1,role2,role3 \
+  --allowed-clusters cluster1
 
 $ pulsar-admin tenants create my-tenant \
   -r role1
+  -c cluster1
 
 ```
 
 </TabItem>
 <TabItem value="REST API">
 
-{@inject: endpoint|POST|/admin/v2/tenants/:tenant|operation/createTenant?version=@pulsar:version_number@}
+{@inject: endpoint|PUT|/admin/v2/tenants/:tenant|operation/createTenant?version=@pulsar:version_number@}
 
 </TabItem>
 <TabItem value="JAVA">
@@ -140,7 +143,7 @@ $ pulsar-admin tenants get my-tenant
 </TabItem>
 <TabItem value="REST API">
 
-{@inject: endpoint|GET|/admin/v2/tenants/:cluster|operation/getTenant?version=@pulsar:version_number@}
+{@inject: endpoint|GET|/admin/v2/tenants/:tenant|operation/getTenant?version=@pulsar:version_number@}
 
 </TabItem>
 <TabItem value="JAVA">
@@ -175,7 +178,7 @@ $ pulsar-admin tenants delete my-tenant
 </TabItem>
 <TabItem value="REST API">
 
-{@inject: endpoint|DELETE|/admin/v2/tenants/:cluster|operation/deleteTenant?version=@pulsar:version_number@}
+{@inject: endpoint|DELETE|/admin/v2/tenants/:tenant|operation/deleteTenant?version=@pulsar:version_number@}
 
 </TabItem>
 <TabItem value="JAVA">
@@ -210,7 +213,7 @@ $ pulsar-admin tenants update my-tenant
 </TabItem>
 <TabItem value="REST API">
 
-{@inject: endpoint|DELETE|/admin/v2/tenants/:cluster|operation/updateTenant?version=@pulsar:version_number@}
+{@inject: endpoint|POST|/admin/v2/tenants/:tenant|operation/updateTenant?version=@pulsar:version_number@}
 
 </TabItem>
 <TabItem value="JAVA">
diff --git a/site2/website-next/versioned_docs/version-2.4.1/administration-load-balance.md b/site2/website-next/versioned_docs/version-2.4.1/administration-load-balance.md
index 3efba60..834b156 100644
--- a/site2/website-next/versioned_docs/version-2.4.1/administration-load-balance.md
+++ b/site2/website-next/versioned_docs/version-2.4.1/administration-load-balance.md
@@ -2,13 +2,11 @@
 id: administration-load-balance
 title: Pulsar load balance
 sidebar_label: "Load balance"
-original_id: administration-load-balance
 ---
 
 ## Load balance across Pulsar brokers
 
-Pulsar is an horizontally scalable messaging system, so the traffic
-in a logical cluster must be spread across all the available Pulsar brokers as evenly as possible, which is a core requirement.
+Pulsar is an horizontally scalable messaging system, so the traffic in a logical cluster must be balanced across all the available Pulsar brokers as evenly as possible, which is a core requirement.
 
 You can use multiple settings and tools to control the traffic distribution which require a bit of context to understand how the traffic is managed in Pulsar. Though, in most cases, the core requirement mentioned above is true out of the box and you should not worry about it. 
 
@@ -36,11 +34,9 @@ Instead of individual topic or partition assignment, each broker takes ownership
 
 The namespace is the "administrative" unit: many config knobs or operations are done at the namespace level.
 
-For assignment, a namespaces is sharded into a list of "bundles", with each bundle comprising
-a portion of overall hash range of the namespace.
+For assignment, a namespaces is sharded into a list of "bundles", with each bundle comprising a portion of overall hash range of the namespace.
 
-Topics are assigned to a particular bundle by taking the hash of the topic name and checking in which
-bundle the hash falls into.
+Topics are assigned to a particular bundle by taking the hash of the topic name and checking in which bundle the hash falls into.
 
 Each bundle is independent of the others and thus is independently assigned to different brokers.
 
@@ -72,8 +68,7 @@ On the same note, it is beneficial to start with more bundles than the number of
 
 ### Unload topics and bundles
 
-You can "unload" a topic in Pulsar with admin operation. Unloading means to close the topics,
-release ownership and reassign the topics to a new broker, based on current load.
+You can "unload" a topic in Pulsar with admin operation. Unloading means to close the topics, release ownership and reassign the topics to a new broker, based on current load.
 
 When unloading happens, the client experiences a small latency blip, typically in the order of tens of milliseconds, while the topic is reassigned.
 
@@ -97,9 +92,11 @@ pulsar-admin namespaces unload tenant/namespace
 
 ### Split namespace bundles 
 
-Since the load for the topics in a bundle might change over time, or predicting upfront might just be hard, brokers can split bundles into two. The new smaller bundles can be reassigned to different brokers.
+Since the load for the topics in a bundle might change over time and predicting the load might be hard, bundle split is designed to deal with these issues. The broker splits a bundle into two and the new smaller bundles can be reassigned to different brokers.
 
-The splitting happens based on some tunable thresholds. Any existing bundle that exceeds any of the threshold is a candidate to be split. By default the newly split bundles are also immediately offloaded to other brokers, to facilitate the traffic distribution.
+The splitting is based on some tunable thresholds. Any existing bundle that exceeds any of the threshold is a candidate to be split. By default the newly split bundles are also immediately offloaded to other brokers, to facilitate the traffic distribution. 
+
+You can split namespace bundles in two ways, by setting `supportedNamespaceBundleSplitAlgorithms` to `range_equally_divide` or `topic_count_equally_divide` in `broker.conf` file. The former splits the bundle into two parts with the same hash range size; the latter splits the bundle into two parts with the same number of topics. You can also configure other parameters for namespace bundles.
 
 ```properties
 
@@ -130,13 +127,11 @@ loadBalancerNamespaceMaximumBundles=128
 
 The support for automatic load shedding is available in the load manager of Pulsar. This means that whenever the system recognizes a particular broker is overloaded, the system forces some traffic to be reassigned to less loaded brokers.
 
-When a broker is identified as overloaded, the broker forces to "unload" a subset of the bundles, the
-ones with higher traffic, that make up for the overload percentage.
+When a broker is identified as overloaded, the broker forces to "unload" a subset of the bundles, the ones with higher traffic, that make up for the overload percentage.
 
 For example, the default threshold is 85% and if a broker is over quota at 95% CPU usage, then the broker unloads the percent difference plus a 5% margin: `(95% - 85%) + 5% = 15%`.
 
-Given the selection of bundles to offload is based on traffic (as a proxy measure for cpu, network
-and memory), broker unloads bundles for at least 15% of traffic.
+Given the selection of bundles to offload is based on traffic (as a proxy measure for cpu, network and memory), broker unloads bundles for at least 15% of traffic.
 
 The automatic load shedding is enabled by default and you can disable the automatic load shedding with this setting:
 
@@ -160,6 +155,20 @@ loadBalancerSheddingGracePeriodMinutes=30
 
 ```
 
+Pulsar supports three types of shedding strategies:
+
+##### ThresholdShedder
+This strategy tends to shed the bundles if any broker's usage is above the configured threshold. It does this by first computing the average resource usage per broker for the whole cluster. The resource usage for each broker is calculated using the following method: LocalBrokerData#getMaxResourceUsageWithWeight). The weights for each resource are configurable. Historical observations are included in the running average based on the broker's setting for loadBalancerHistoryResourcePercenta [...]
+`loadBalancerLoadSheddingStrategy=org.apache.pulsar.broker.loadbalance.impl.ThresholdShedder`
+
+##### OverloadShedder
+This strategy will attempt to shed exactly one bundle on brokers which are overloaded, that is, whose maximum system resource usage exceeds loadBalancerBrokerOverloadedThresholdPercentage. To see which resources are considered when determining the maximum system resource. A bundle is recommended for unloading off that broker if and only if the following conditions hold: The broker has at least two bundles assigned and the broker has at least one bundle that has not been unloaded recently [...]
+`loadBalancerLoadSheddingStrategy=org.apache.pulsar.broker.loadbalance.impl.OverloadShedder`
+
+##### UniformLoadShedder
+This strategy tends to distribute load uniformly across all brokers. This strategy checks laod difference between broker with highest load and broker with lowest load. If the difference is higher than configured thresholds `loadBalancerMsgRateDifferenceShedderThreshold` and `loadBalancerMsgThroughputMultiplierDifferenceShedderThreshold` then it finds out bundles which can be unloaded to distribute traffic evenly across all brokers. Configure broker with below value to use this strategy.
+`loadBalancerLoadSheddingStrategy=org.apache.pulsar.broker.loadbalance.impl.UniformLoadShedder`
+
 #### Broker overload thresholds
 
 The determinations of when a broker is overloaded is based on threshold of CPU, network and memory usage. Whenever either of those metrics reaches the threshold, the system triggers the shedding (if enabled).
@@ -175,9 +184,7 @@ loadBalancerBrokerOverloadedThresholdPercentage=85
 
 Pulsar gathers the usage stats from the system metrics.
 
-In case of network utilization, in some cases the network interface speed that Linux reports is
-not correct and needs to be manually overridden. This is the case in AWS EC2 instances with 1Gbps
-NIC speed for which the OS reports 10Gbps speed.
+In case of network utilization, in some cases the network interface speed that Linux reports is not correct and needs to be manually overridden. This is the case in AWS EC2 instances with 1Gbps NIC speed for which the OS reports 10Gbps speed.
 
 Because of the incorrect max speed, the Pulsar load manager might think the broker has not reached the NIC capacity, while in fact the broker already uses all the bandwidth and the traffic is slowed down.
 
diff --git a/site2/website-next/versioned_docs/version-2.4.1/administration-proxy.md b/site2/website-next/versioned_docs/version-2.4.1/administration-proxy.md
index c046ed3..3cef937 100644
--- a/site2/website-next/versioned_docs/version-2.4.1/administration-proxy.md
+++ b/site2/website-next/versioned_docs/version-2.4.1/administration-proxy.md
@@ -2,10 +2,9 @@
 id: administration-proxy
 title: Pulsar proxy
 sidebar_label: "Pulsar proxy"
-original_id: administration-proxy
 ---
 
-Pulsar proxy is an optional gateway. Pulsar proxy is used when direction connections between clients and Pulsar brokers are either infeasible or undesirable. For example, when you run Pulsar in a cloud environment or on [Kubernetes](https://kubernetes.io) or an analogous platform, you can run Pulsar proxy.
+Pulsar proxy is an optional gateway. Pulsar proxy is used when direct connections between clients and Pulsar brokers are either infeasible or undesirable. For example, when you run Pulsar in a cloud environment or on [Kubernetes](https://kubernetes.io) or an analogous platform, you can run Pulsar proxy.
 
 ## Configure the proxy
 
diff --git a/site2/website-next/versioned_docs/version-2.4.1/administration-stats.md b/site2/website-next/versioned_docs/version-2.4.1/administration-stats.md
index ac0c036..2ccd73c 100644
--- a/site2/website-next/versioned_docs/version-2.4.1/administration-stats.md
+++ b/site2/website-next/versioned_docs/version-2.4.1/administration-stats.md
@@ -2,7 +2,6 @@
 id: administration-stats
 title: Pulsar stats
 sidebar_label: "Pulsar statistics"
-original_id: administration-stats
 ---
 
 ## Partitioned topics
diff --git a/site2/website-next/versioned_docs/version-2.4.1/administration-zk-bk.md b/site2/website-next/versioned_docs/version-2.4.1/administration-zk-bk.md
index de10d50..e5f9688 100644
--- a/site2/website-next/versioned_docs/version-2.4.1/administration-zk-bk.md
+++ b/site2/website-next/versioned_docs/version-2.4.1/administration-zk-bk.md
@@ -2,7 +2,6 @@
 id: administration-zk-bk
 title: ZooKeeper and BookKeeper administration
 sidebar_label: "ZooKeeper and BookKeeper"
-original_id: administration-zk-bk
 ---
 
 Pulsar relies on two external systems for essential tasks:
diff --git a/site2/website-next/versioned_docs/version-2.4.1/client-libraries-cpp.md b/site2/website-next/versioned_docs/version-2.4.1/client-libraries-cpp.md
index 333ec67..958861a 100644
--- a/site2/website-next/versioned_docs/version-2.4.1/client-libraries-cpp.md
+++ b/site2/website-next/versioned_docs/version-2.4.1/client-libraries-cpp.md
@@ -2,7 +2,6 @@
 id: client-libraries-cpp
 title: Pulsar C++ client
 sidebar_label: "C++"
-original_id: client-libraries-cpp
 ---
 
 You can use Pulsar C++ client to create Pulsar producers and consumers in C++.
@@ -11,7 +10,7 @@ All the methods in producer, consumer, and reader of a C++ client are thread-saf
 
 ## Supported platforms
 
-Pulsar C++ client is supported on **Linux** and **MacOS** platforms.
+Pulsar C++ client is supported on **Linux** ,**MacOS** and **Windows** platforms.
 
 [Doxygen](http://www.doxygen.nl/)-generated API docs for the C++ client are available [here](/api/cpp).
 
@@ -21,8 +20,8 @@ You need to install the following components before using the C++ client:
 
 * [CMake](https://cmake.org/)
 * [Boost](http://www.boost.org/)
-* [Protocol Buffers](https://developers.google.com/protocol-buffers/) 2.6
-* [libcurl](https://curl.haxx.se/libcurl/)
+* [Protocol Buffers](https://developers.google.com/protocol-buffers/) >= 3
+* [libcurl](https://curl.se/libcurl/)
 * [Google Test](https://github.com/google/googletest)
 
 ## Linux
@@ -147,6 +146,12 @@ $ rpm -ivh apache-pulsar-client*.rpm
 
 After you install RPM successfully, Pulsar libraries are in the `/usr/lib` directory.
 
+:::note
+
+If you get the error that `libpulsar.so: cannot open shared object file: No such file or directory` when starting Pulsar client, you may need to run `ldconfig` first.
+
+:::
+
 ### Install Debian
 
 1. Download a Debian package from the links in the table. 
@@ -236,10 +241,8 @@ $ export OPENSSL_INCLUDE_DIR=/usr/local/opt/openssl/include/
 $ export OPENSSL_ROOT_DIR=/usr/local/opt/openssl/
 
 # Protocol Buffers installation
-$ brew tap homebrew/versions
-$ brew install protobuf260
-$ brew install boost
-$ brew install log4cxx
+$ brew install protobuf boost boost-python log4cxx
+# If you are using python3, you need to install boost-python3 
 
 # Google Test installation
 $ git clone https://github.com/google/googletest.git
@@ -269,6 +272,50 @@ brew install libpulsar
 
 ```
 
+## Windows (64-bit)
+
+### Compilation
+
+1. Clone the Pulsar repository.
+
+```shell
+
+$ git clone https://github.com/apache/pulsar
+
+```
+
+2. Install all necessary dependencies.
+
+```shell
+
+cd ${PULSAR_HOME}/pulsar-client-cpp
+vcpkg install --feature-flags=manifests --triplet x64-windows
+
+```
+
+3. Build C++ libraries.
+
+```shell
+
+cmake -B ./build -A x64 -DBUILD_PYTHON_WRAPPER=OFF -DBUILD_TESTS=OFF -DVCPKG_TRIPLET=x64-windows -DCMAKE_BUILD_TYPE=Release -S .
+cmake --build ./build --config Release
+
+```
+
+> **NOTE**
+>
+> 1. For Windows 32-bit, you need to use `-A Win32` and `-DVCPKG_TRIPLET=x86-windows`.
+> 2. For MSVC Debug mode, you need to replace `Release` with `Debug` for both `CMAKE_BUILD_TYPE` variable and `--config` option.
+
+4. Client libraries are available in the following places.
+
+```
+
+${PULSAR_HOME}/pulsar-client-cpp/build/lib/Release/pulsar.lib
+${PULSAR_HOME}/pulsar-client-cpp/build/lib/Release/pulsar.dll
+
+```
+
 ## Connection URLs
 
 To connect Pulsar using client libraries, you need to specify a Pulsar protocol URL.
@@ -299,109 +346,361 @@ pulsar+ssl://pulsar.us-west.example.com:6651
 
 ## Create a consumer
 
-To use Pulsar as a consumer, you need to create a consumer on the C++ client. The following is an example. 
+To use Pulsar as a consumer, you need to create a consumer on the C++ client. There are two main ways of using the consumer:
+- [Blocking style](#blocking-example): synchronously calling `receive(msg)`.
+- [Non-blocking](#consumer-with-a-message-listener) (event based) style: using a message listener.
+
+### Blocking example
+
+The benefit of this approach is that it is the simplest code. Simply keeps calling `receive(msg)` which blocks until a message is received.
+
+This example starts a subscription at the earliest offset and consumes 100 messages.
 
 ```c++
 
-Client client("pulsar://localhost:6650");
+#include <pulsar/Client.h>
+
+using namespace pulsar;
+
+int main() {
+    Client client("pulsar://localhost:6650");
+
+    Consumer consumer;
+    ConsumerConfiguration config;
+    config.setSubscriptionInitialPosition(InitialPositionEarliest);
+    Result result = client.subscribe("persistent://public/default/my-topic", "consumer-1", config, consumer);
+    if (result != ResultOk) {
+        std::cout << "Failed to subscribe: " << result << std::endl;
+        return -1;
+    }
 
-Consumer consumer;
-Result result = client.subscribe("my-topic", "my-subscription-name", consumer);
-if (result != ResultOk) {
-    LOG_ERROR("Failed to subscribe: " << result);
-    return -1;
+    Message msg;
+    int ctr = 0;
+    // consume 100 messages
+    while (ctr < 100) {
+        consumer.receive(msg);
+        std::cout << "Received: " << msg
+            << "  with payload '" << msg.getDataAsString() << "'" << std::endl;
+
+        consumer.acknowledge(msg);
+        ctr++;
+    }
+
+    std::cout << "Finished consuming synchronously!" << std::endl;
+
+    client.close();
+    return 0;
 }
 
-Message msg;
+```
+
+### Consumer with a message listener
+
+You can avoid  running a loop with blocking calls with an event based style by using a message listener which is invoked for each message that is received.
+
+This example starts a subscription at the earliest offset and consumes 100 messages.
 
-while (true) {
-    consumer.receive(msg);
-    LOG_INFO("Received: " << msg
-            << "  with payload '" << msg.getDataAsString() << "'");
+```c++
+
+#include <pulsar/Client.h>
+#include <atomic>
+#include <thread>
+
+using namespace pulsar;
+
+std::atomic<uint32_t> messagesReceived;
+
+void handleAckComplete(Result res) {
+    std::cout << "Ack res: " << res << std::endl;
+}
 
-    consumer.acknowledge(msg);
+void listener(Consumer consumer, const Message& msg) {
+    std::cout << "Got message " << msg << " with content '" << msg.getDataAsString() << "'" << std::endl;
+    messagesReceived++;
+    consumer.acknowledgeAsync(msg.getMessageId(), handleAckComplete);
 }
 
-client.close();
+int main() {
+    Client client("pulsar://localhost:6650");
+
+    Consumer consumer;
+    ConsumerConfiguration config;
+    config.setMessageListener(listener);
+    config.setSubscriptionInitialPosition(InitialPositionEarliest);
+    Result result = client.subscribe("persistent://public/default/my-topic", "consumer-1", config, consumer);
+    if (result != ResultOk) {
+        std::cout << "Failed to subscribe: " << result << std::endl;
+        return -1;
+    }
+
+    // wait for 100 messages to be consumed
+    while (messagesReceived < 100) {
+        std::this_thread::sleep_for(std::chrono::milliseconds(100));
+    }
+
+    std::cout << "Finished consuming asynchronously!" << std::endl;
+
+    client.close();
+    return 0;
+}
 
 ```
 
 ## Create a producer
 
-To use Pulsar as a producer, you need to create a producer on the C++ client. The following is an example. 
+To use Pulsar as a producer, you need to create a producer on the C++ client. There are two main ways of using a producer:
+- [Blocking style](#simple-blocking-example) : each call to `send` waits for an ack from the broker.
+- [Non-blocking asynchronous style](#non-blocking-example) : `sendAsync` is called instead of `send` and a callback is supplied for when the ack is received from the broker.
+
+### Simple blocking example
+
+This example sends 100 messages using the blocking style. While simple, it does not produce high throughput as it waits for each ack to come back before sending the next message.
 
 ```c++
 
-Client client("pulsar://localhost:6650");
+#include <pulsar/Client.h>
+#include <thread>
 
-Producer producer;
-Result result = client.createProducer("my-topic", producer);
-if (result != ResultOk) {
-    LOG_ERROR("Error creating producer: " << result);
-    return -1;
-}
+using namespace pulsar;
+
+int main() {
+    Client client("pulsar://localhost:6650");
+
+    Result result = client.createProducer("persistent://public/default/my-topic", producer);
+    if (result != ResultOk) {
+        std::cout << "Error creating producer: " << result << std::endl;
+        return -1;
+    }
+
+    // Send 100 messages synchronously
+    int ctr = 0;
+    while (ctr < 100) {
+        std::string content = "msg" + std::to_string(ctr);
+        Message msg = MessageBuilder().setContent(content).setProperty("x", "1").build();
+        Result result = producer.send(msg);
+        if (result != ResultOk) {
+            std::cout << "The message " << content << " could not be sent, received code: " << result << std::endl;
+        } else {
+            std::cout << "The message " << content << " sent successfully" << std::endl;
+        }
+
+        std::this_thread::sleep_for(std::chrono::milliseconds(100));
+        ctr++;
+    }
+
+    std::cout << "Finished producing synchronously!" << std::endl;
 
-// Publish 10 messages to the topic
-for (int i = 0; i < 10; i++){
-    Message msg = MessageBuilder().setContent("my-message").build();
-    Result res = producer.send(msg);
-    LOG_INFO("Message sent: " << res);
+    client.close();
+    return 0;
 }
-client.close();
 
 ```
 
-## Enable authentication in connection URLs
-If you use TLS authentication when connecting to Pulsar, you need to add `ssl` in the connection URLs, and the default port is `6651`. The following is an example.
+### Non-blocking example
 
-```cpp
+This example sends 100 messages using the non-blocking style calling `sendAsync` instead of `send`. This allows the producer to have multiple messages inflight at a time which increases throughput.
 
-ClientConfiguration config = ClientConfiguration();
-config.setUseTls(true);
-config.setTlsTrustCertsFilePath("/path/to/cacert.pem");
-config.setTlsAllowInsecureConnection(false);
-config.setAuth(pulsar::AuthTls::create(
-            "/path/to/client-cert.pem", "/path/to/client-key.pem"););
+The producer configuration `blockIfQueueFull` is useful here to avoid `ResultProducerQueueIsFull` errors when the internal queue for outgoing send requests becomes full. Once the internal queue is full, `sendAsync` becomes blocking which can make your code simpler.
 
-Client client("pulsar+ssl://my-broker.com:6651", config);
+Without this configuration, the result code `ResultProducerQueueIsFull` is passed to the callback. You must decide how to deal with that (retry, discard etc).
+
+```c++
+
+#include <pulsar/Client.h>
+#include <thread>
+
+using namespace pulsar;
+
+std::atomic<uint32_t> acksReceived;
+
+void callback(Result code, const MessageId& msgId, std::string msgContent) {
+    // message processing logic here
+    std::cout << "Received ack for msg: " << msgContent << " with code: "
+        << code << " -- MsgID: " << msgId << std::endl;
+    acksReceived++;
+}
+
+int main() {
+    Client client("pulsar://localhost:6650");
+
+    ProducerConfiguration producerConf;
+    producerConf.setBlockIfQueueFull(true);
+    Producer producer;
+    Result result = client.createProducer("persistent://public/default/my-topic",
+                                          producerConf, producer);
+    if (result != ResultOk) {
+        std::cout << "Error creating producer: " << result << std::endl;
+        return -1;
+    }
+
+    // Send 100 messages asynchronously
+    int ctr = 0;
+    while (ctr < 100) {
+        std::string content = "msg" + std::to_string(ctr);
+        Message msg = MessageBuilder().setContent(content).setProperty("x", "1").build();
+        producer.sendAsync(msg, std::bind(callback,
+                                          std::placeholders::_1, std::placeholders::_2, content));
+
+        std::this_thread::sleep_for(std::chrono::milliseconds(100));
+        ctr++;
+    }
+
+    // wait for 100 messages to be acked
+    while (acksReceived < 100) {
+        std::this_thread::sleep_for(std::chrono::milliseconds(100));
+    }
+
+    std::cout << "Finished producing asynchronously!" << std::endl;
+
+    client.close();
+    return 0;
+}
 
 ```
 
-For complete examples, refer to [C++ client examples](https://github.com/apache/pulsar/tree/master/pulsar-client-cpp/examples).
+### Partitioned topics and lazy producers
 
-## Schema
+When scaling out a Pulsar topic, you may configure a topic to have hundreds of partitions. Likewise, you may have also scaled out your producers so there are hundreds or even thousands of producers. This can put some strain on the Pulsar brokers as when you create a producer on a partitioned topic, internally it creates one internal producer per partition which involves communications to the brokers for each one. So for a topic with 1000 partitions and 1000 producers, it ends up creating [...]
 
-This section describes some examples about schema. For more information about schema, see [Pulsar schema](schema-get-started).
+You can reduce the load caused by this combination of a large number of partitions and many producers by doing the following:
+- use SinglePartition partition routing mode (this ensures that all messages are only sent to a single, randomly selected partition)
+- use non-keyed messages (when messages are keyed, routing is based on the hash of the key and so messages will end up being sent to multiple partitions)
+- use lazy producers (this ensures that an internal producer is only created on demand when a message needs to be routed to a partition)
 
-### Create producer with Avro schema
+With our example above, that reduces the number of internal producers spread out over the 1000 producer apps from 1,000,000 to just 1000.
 
-The following example shows how to create a producer with an Avro schema.
+Note that there can be extra latency for the first message sent. If you set a low send timeout, this timeout could be reached if the initial connection handshake is slow to complete.
 
-```cpp
+```c++
 
-static const std::string exampleSchema =
-    "{\"type\":\"record\",\"name\":\"Example\",\"namespace\":\"test\","
-    "\"fields\":[{\"name\":\"a\",\"type\":\"int\"},{\"name\":\"b\",\"type\":\"int\"}]}";
-Producer producer;
 ProducerConfiguration producerConf;
-producerConf.setSchema(SchemaInfo(AVRO, "Avro", exampleSchema));
-client.createProducer("topic-avro", producerConf, producer);
+producerConf.setPartitionsRoutingMode(ProducerConfiguration::UseSinglePartition);
+producerConf.setLazyStartPartitionedProducers(true);
 
 ```
 
-### Create consumer with Avro schema
-
-The following example shows how to create a consumer with an Avro schema.
+## Enable authentication in connection URLs
+If you use TLS authentication when connecting to Pulsar, you need to add `ssl` in the connection URLs, and the default port is `6651`. The following is an example.
 
 ```cpp
 
-static const std::string exampleSchema =
-    "{\"type\":\"record\",\"name\":\"Example\",\"namespace\":\"test\","
-    "\"fields\":[{\"name\":\"a\",\"type\":\"int\"},{\"name\":\"b\",\"type\":\"int\"}]}";
-ConsumerConfiguration consumerConf;
-Consumer consumer;
-consumerConf.setSchema(SchemaInfo(AVRO, "Avro", exampleSchema));
-client.subscribe("topic-avro", "sub-2", consumerConf, consumer)
+ClientConfiguration config = ClientConfiguration();
+config.setUseTls(true);
+config.setTlsTrustCertsFilePath("/path/to/cacert.pem");
+config.setTlsAllowInsecureConnection(false);
+config.setAuth(pulsar::AuthTls::create(
+            "/path/to/client-cert.pem", "/path/to/client-key.pem"););
+
+Client client("pulsar+ssl://my-broker.com:6651", config);
 
 ```
 
+For complete examples, refer to [C++ client examples](https://github.com/apache/pulsar/tree/master/pulsar-client-cpp/examples).
+
+## Schema
+
+This section describes some examples about schema. For more information about
+schema, see [Pulsar schema](schema-get-started).
+
+### Avro schema
+
+- The following example shows how to create a producer with an Avro schema.
+
+  ```cpp
+  
+  static const std::string exampleSchema =
+      "{\"type\":\"record\",\"name\":\"Example\",\"namespace\":\"test\","
+      "\"fields\":[{\"name\":\"a\",\"type\":\"int\"},{\"name\":\"b\",\"type\":\"int\"}]}";
+  Producer producer;
+  ProducerConfiguration producerConf;
+  producerConf.setSchema(SchemaInfo(AVRO, "Avro", exampleSchema));
+  client.createProducer("topic-avro", producerConf, producer);
+  
+  ```
+
+- The following example shows how to create a consumer with an Avro schema.
+
+  ```cpp
+  
+  static const std::string exampleSchema =
+      "{\"type\":\"record\",\"name\":\"Example\",\"namespace\":\"test\","
+      "\"fields\":[{\"name\":\"a\",\"type\":\"int\"},{\"name\":\"b\",\"type\":\"int\"}]}";
+  ConsumerConfiguration consumerConf;
+  Consumer consumer;
+  consumerConf.setSchema(SchemaInfo(AVRO, "Avro", exampleSchema));
+  client.subscribe("topic-avro", "sub-2", consumerConf, consumer)
+  
+  ```
+
+### ProtobufNative schema
+
+The following example shows how to create a producer and a consumer with a ProtobufNative schema.
+​
+1. Generate the `User` class using Protobuf3. 
+
+   :::note
+
+   You need to use Protobuf3 or later versions.
+
+   :::
+
+​
+
+   ```protobuf
+   
+   syntax = "proto3";
+   
+   message User {
+       string name = 1;
+       int32 age = 2;
+   }
+   
+   ```
+
+​
+2. Include the `ProtobufNativeSchema.h` in your source code. Ensure the Protobuf dependency has been added to your project.
+​
+
+   ```c++
+   
+   #include <pulsar/ProtobufNativeSchema.h>
+   
+   ```
+
+​
+3. Create a producer to send a `User` instance.
+​
+
+   ```c++
+   
+   ProducerConfiguration producerConf;
+   producerConf.setSchema(createProtobufNativeSchema(User::GetDescriptor()));
+   Producer producer;
+   client.createProducer("topic-protobuf", producerConf, producer);
+   User user;
+   user.set_name("my-name");
+   user.set_age(10);
+   std::string content;
+   user.SerializeToString(&content);
+   producer.send(MessageBuilder().setContent(content).build());
+   
+   ```
+
+​
+4. Create a consumer to receive a `User` instance.
+​
+
+   ```c++
+   
+   ConsumerConfiguration consumerConf;
+   consumerConf.setSchema(createProtobufNativeSchema(User::GetDescriptor()));
+   consumerConf.setSubscriptionInitialPosition(InitialPositionEarliest);
+   Consumer consumer;
+   client.subscribe("topic-protobuf", "my-sub", consumerConf, consumer);
+   Message msg;
+   consumer.receive(msg);
+   User user2;
+   user2.ParseFromArray(msg.getData(), msg.getLength());
+   
+   ```
+
diff --git a/site2/website-next/versioned_docs/version-2.4.1/client-libraries-go.md b/site2/website-next/versioned_docs/version-2.4.1/client-libraries-go.md
index 7e797bb..22df463 100644
--- a/site2/website-next/versioned_docs/version-2.4.1/client-libraries-go.md
+++ b/site2/website-next/versioned_docs/version-2.4.1/client-libraries-go.md
@@ -2,7 +2,6 @@
 id: client-libraries-go
 title: Pulsar Go client
 sidebar_label: "Go"
-original_id: client-libraries-go
 ---
 
 > Tips: Currently, the CGo client will be deprecated, if you want to know more about the CGo client, please refer to [CGo client docs](client-libraries-cgo)
@@ -286,7 +285,8 @@ defer client.Close()
 
 topicName := newTopicName()
 producer, err := client.CreateProducer(pulsar.ProducerOptions{
-	Topic: topicName,
+    Topic:           topicName,
+    DisableBatching: true,
 })
 if err != nil {
 	log.Fatal(err)
@@ -330,6 +330,85 @@ canc()
 
 ```
 
+#### How to use Prometheus metrics in producer
+
+Pulsar Go client registers client metrics using Prometheus. This section demonstrates how to create a simple Pulsar producer application that exposes Prometheus metrics via HTTP.
+
+1. Write a simple producer application.
+
+```go
+
+// Create a Pulsar client
+client, err := pulsar.NewClient(pulsar.ClientOptions{
+	URL: "pulsar://localhost:6650",
+})
+if err != nil {
+    log.Fatal(err)
+}
+
+defer client.Close()
+
+// Start a separate goroutine for Prometheus metrics
+// In this case, Prometheus metrics can be accessed via http://localhost:2112/metrics
+go func() {
+    prometheusPort := 2112
+    log.Printf("Starting Prometheus metrics at http://localhost:%v/metrics\n", prometheusPort)
+    http.Handle("/metrics", promhttp.Handler())
+    err = http.ListenAndServe(":"+strconv.Itoa(prometheusPort), nil)
+    if err != nil {
+        log.Fatal(err)
+    }
+}()
+
+// Create a producer
+producer, err := client.CreateProducer(pulsar.ProducerOptions{
+    Topic: "topic-1",
+})
+if err != nil {
+    log.Fatal(err)
+}
+
+defer producer.Close()
+
+ctx := context.Background()
+
+// Write your business logic here
+// In this case, you build a simple Web server. You can produce messages by requesting http://localhost:8082/produce
+webPort := 8082
+http.HandleFunc("/produce", func(w http.ResponseWriter, r *http.Request) {
+    msgId, err := producer.Send(ctx, &pulsar.ProducerMessage{
+        Payload: []byte(fmt.Sprintf("hello world")),
+    })
+    if err != nil {
+        log.Fatal(err)
+    } else {
+        log.Printf("Published message: %v", msgId)
+        fmt.Fprintf(w, "Published message: %v", msgId)
+    }
+})
+
+err = http.ListenAndServe(":"+strconv.Itoa(webPort), nil)
+if err != nil {
+    log.Fatal(err)
+}
+
+```
+
+2. To scrape metrics from applications, configure a local running Prometheus instance using a configuration file (`prometheus.yml`).
+
+```yaml
+
+scrape_configs:
+- job_name: pulsar-client-go-metrics
+  scrape_interval: 10s
+  static_configs:
+  - targets:
+  - localhost:2112
+
+```
+
+Now you can query Pulsar client metrics on Prometheus.
+
 ### Producer configuration
 
  Name | Description | Default
@@ -607,6 +686,85 @@ defer consumer.Close()
 
 ```
 
+#### How to use Prometheus metrics in consumer
+
+In this guide, This section demonstrates how to create a simple Pulsar consumer application that exposes Prometheus metrics via HTTP.
+1. Write a simple consumer application.
+
+```go
+
+// Create a Pulsar client
+client, err := pulsar.NewClient(pulsar.ClientOptions{
+    URL: "pulsar://localhost:6650",
+})
+if err != nil {
+    log.Fatal(err)
+}
+
+defer client.Close()
+
+// Start a separate goroutine for Prometheus metrics
+// In this case, Prometheus metrics can be accessed via http://localhost:2112/metrics
+go func() {
+    prometheusPort := 2112
+    log.Printf("Starting Prometheus metrics at http://localhost:%v/metrics\n", prometheusPort)
+    http.Handle("/metrics", promhttp.Handler())
+    err = http.ListenAndServe(":"+strconv.Itoa(prometheusPort), nil)
+    if err != nil {
+        log.Fatal(err)
+    }
+}()
+
+// Create a consumer
+consumer, err := client.Subscribe(pulsar.ConsumerOptions{
+    Topic:            "topic-1",
+    SubscriptionName: "sub-1",
+    Type:             pulsar.Shared,
+})
+if err != nil {
+    log.Fatal(err)
+}
+
+defer consumer.Close()
+
+ctx := context.Background()
+
+// Write your business logic here
+// In this case, you build a simple Web server. You can consume messages by requesting http://localhost:8083/consume
+webPort := 8083
+http.HandleFunc("/consume", func(w http.ResponseWriter, r *http.Request) {
+    msg, err := consumer.Receive(ctx)
+    if err != nil {
+        log.Fatal(err)
+    } else {
+        log.Printf("Received message msgId: %v -- content: '%s'\n", msg.ID(), string(msg.Payload()))
+        fmt.Fprintf(w, "Received message msgId: %v -- content: '%s'\n", msg.ID(), string(msg.Payload()))
+        consumer.Ack(msg)
+    }
+})
+
+err = http.ListenAndServe(":"+strconv.Itoa(webPort), nil)
+if err != nil {
+    log.Fatal(err)
+}
+
+```
+
+2. To scrape metrics from applications, configure a local running Prometheus instance using a configuration file (`prometheus.yml`).
+
+```yaml
+
+scrape_configs:
+- job_name: pulsar-client-go-metrics
+  scrape_interval: 10s
+  static_configs:
+  - targets:
+  - localhost:2112
+
+```
+
+Now you can query Pulsar client metrics on Prometheus.
+
 ### Consumer configuration
 
  Name | Description | Default
diff --git a/site2/website-next/versioned_docs/version-2.4.1/client-libraries-websocket.md b/site2/website-next/versioned_docs/version-2.4.1/client-libraries-websocket.md
index bc13b43..c663f97 100644
--- a/site2/website-next/versioned_docs/version-2.4.1/client-libraries-websocket.md
+++ b/site2/website-next/versioned_docs/version-2.4.1/client-libraries-websocket.md
@@ -2,7 +2,6 @@
 id: client-libraries-websocket
 title: Pulsar WebSocket API
 sidebar_label: "WebSocket"
-original_id: client-libraries-websocket
 ---
 
 Pulsar [WebSocket](https://developer.mozilla.org/en-US/docs/Web/API/WebSockets_API) API provides a simple way to interact with Pulsar using languages that do not have an official [client library](getting-started-clients). Through WebSocket, you can publish and consume messages and use features available on the [Client Features Matrix](https://github.com/apache/pulsar/wiki/Client-Features-Matrix) page.
@@ -190,7 +189,7 @@ Key | Type | Required? | Explanation
 `maxRedeliverCount` | int | no | Define a [maxRedeliverCount](http://pulsar.apache.org/api/client/org/apache/pulsar/client/api/ConsumerBuilder.html#deadLetterPolicy-org.apache.pulsar.client.api.DeadLetterPolicy-) for the consumer (default: 0). Activates [Dead Letter Topic](https://github.com/apache/pulsar/wiki/PIP-22%3A-Pulsar-Dead-Letter-Topic) feature.
 `deadLetterTopic` | string | no | Define a [deadLetterTopic](http://pulsar.apache.org/api/client/org/apache/pulsar/client/api/ConsumerBuilder.html#deadLetterPolicy-org.apache.pulsar.client.api.DeadLetterPolicy-) for the consumer (default: {topic}-{subscription}-DLQ). Activates [Dead Letter Topic](https://github.com/apache/pulsar/wiki/PIP-22%3A-Pulsar-Dead-Letter-Topic) feature.
 `pullMode` | boolean | no | Enable pull mode (default: false). See "Flow Control" below.
-`negativeAckRedeliveryDelay` | int | no | When a message is negatively acknowledged, it will be redelivered to the DLQ.
+`negativeAckRedeliveryDelay` | int | no | When a message is negatively acknowledged, the delay time before the message is redelivered (in milliseconds). The default value is 60000.
 `token` | string | no | Authentication token, this is used for the browser javascript client
 
 NB: these parameter (except `pullMode`) apply to the internal consumer of the WebSocket service.
@@ -204,23 +203,60 @@ Server will push messages on the WebSocket session:
 ```json
 
 {
-  "messageId": "CAAQAw==",
-  "payload": "SGVsbG8gV29ybGQ=",
-  "properties": {"key1": "value1", "key2": "value2"},
-  "publishTime": "2016-08-30 16:45:57.785",
-  "redeliveryCount": 4
+  "messageId": "CAMQADAA",
+  "payload": "hvXcJvHW7kOSrUn17P2q71RA5SdiXwZBqw==",
+  "properties": {},
+  "publishTime": "2021-10-29T16:01:38.967-07:00",
+  "redeliveryCount": 0,
+  "encryptionContext": {
+    "keys": {
+      "client-rsa.pem": {
+        "keyValue": "jEuwS+PeUzmCo7IfLNxqoj4h7txbLjCQjkwpaw5AWJfZ2xoIdMkOuWDkOsqgFmWwxiecakS6GOZHs94x3sxzKHQx9Oe1jpwBg2e7L4fd26pp+WmAiLm/ArZJo6JotTeFSvKO3u/yQtGTZojDDQxiqFOQ1ZbMdtMZA8DpSMuq+Zx7PqLo43UdW1+krjQfE5WD+y+qE3LJQfwyVDnXxoRtqWLpVsAROlN2LxaMbaftv5HckoejJoB4xpf/dPOUqhnRstwQHf6klKT5iNhjsY4usACt78uILT0pEPd14h8wEBidBz/vAlC/zVMEqiDVzgNS7dqEYS4iHbf7cnWVCn3Hxw==",
+        "metadata": {}
+      }
+    },
+    "param": "Tfu1PxVm6S9D3+Hk",
+    "compressionType": "NONE",
+    "uncompressedMessageSize": 0,
+    "batchSize": {
+      "empty": false,
+      "present": true
+    }
+  }
 }
 
 ```
 
-Key | Type | Required? | Explanation
-:---|:-----|:----------|:-----------
-`messageId` | string | yes | Message ID
-`payload` | string | yes | Base-64 encoded payload
-`publishTime` | string | yes | Publish timestamp
-`redeliveryCount` | number | yes | Number of times this message was already delivered
-`properties` | key-value pairs | no | Application-defined properties
-`key` | string | no |  Original routing key set by producer
+Below are the parameters in the WebSocket consumer response.
+
+- General parameters
+
+  Key | Type | Required? | Explanation
+  :---|:-----|:----------|:-----------
+  `messageId` | string | yes | Message ID
+  `payload` | string | yes | Base-64 encoded payload
+  `publishTime` | string | yes | Publish timestamp
+  `redeliveryCount` | number | yes | Number of times this message was already delivered
+  `properties` | key-value pairs | no | Application-defined properties
+  `key` | string | no |  Original routing key set by producer
+  `encryptionContext` | EncryptionContext | no | Encryption context that consumers can use to decrypt received messages
+  `param` | string | no | Initialization vector for cipher (Base64 encoding)
+  `batchSize` | string | no | Number of entries in a message (if it is a batch message)
+  `uncompressedMessageSize` | string | no | Message size before compression
+  `compressionType` | string | no | Algorithm used to compress the message payload
+
+- `encryptionContext` related parameter
+
+  Key | Type | Required? | Explanation
+  :---|:-----|:----------|:-----------
+  `keys` |key-EncryptionKey pairs | yes | Key in `key-EncryptionKey` pairs is an encryption key name. Value in `key-EncryptionKey` pairs is an encryption key object.
+
+- `encryptionKey` related parameters
+
+  Key | Type | Required? | Explanation
+  :---|:-----|:----------|:-----------
+  `keyValue` | string | yes | Encryption key (Base64 encoding)
+  `metadata` | key-value pairs | no | Application-defined metadata
 
 #### Acknowledging the message
 
@@ -454,9 +490,15 @@ TOPIC = scheme + '://localhost:8080/ws/v2/producer/persistent/public/default/my-
 
 ws = websocket.create_connection(TOPIC)
 
+# encode message
+s = "Hello World"
+firstEncoded = s.encode("UTF-8")
+binaryEncoded = base64.b64encode(firstEncoded)
+payloadString = binaryEncoded.decode('UTF-8')
+
 # Send one message as JSON
 ws.send(json.dumps({
-    'payload' : base64.b64encode('Hello World'),
+    'payload' : payloadString,
     'properties': {
         'key1' : 'value1',
         'key2' : 'value2'
@@ -466,9 +508,9 @@ ws.send(json.dumps({
 
 response =  json.loads(ws.recv())
 if response['result'] == 'ok':
-    print 'Message published successfully'
+    print( 'Message published successfully')
 else:
-    print 'Failed to publish message:', response
+    print('Failed to publish message:', response)
 ws.close()
 
 ```
@@ -495,7 +537,7 @@ while True:
     msg = json.loads(ws.recv())
     if not msg: break
 
-    print "Received: {} - payload: {}".format(msg, base64.b64decode(msg['payload']))
+    print( "Received: {} - payload: {}".format(msg, base64.b64decode(msg['payload'])))
 
     # Acknowledge successful processing
     ws.send(json.dumps({'messageId' : msg['messageId']}))
@@ -525,7 +567,7 @@ while True:
     msg = json.loads(ws.recv())
     if not msg: break
 
-    print "Received: {} - payload: {}".format(msg, base64.b64decode(msg['payload']))
+    print ( "Received: {} - payload: {}".format(msg, base64.b64decode(msg['payload'])))
 
     # Acknowledge successful processing
     ws.send(json.dumps({'messageId' : msg['messageId']}))
diff --git a/site2/website-next/versioned_docs/version-2.4.1/client-libraries.md b/site2/website-next/versioned_docs/version-2.4.1/client-libraries.md
index 23e5a06..ab5b7c4 100644
--- a/site2/website-next/versioned_docs/version-2.4.1/client-libraries.md
+++ b/site2/website-next/versioned_docs/version-2.4.1/client-libraries.md
@@ -2,7 +2,6 @@
 id: client-libraries
 title: Pulsar client libraries
 sidebar_label: "Overview"
-original_id: client-libraries
 ---
 
 Pulsar supports the following client libraries:
@@ -16,7 +15,7 @@ Pulsar supports the following client libraries:
 - [C# client](client-libraries-dotnet)
 
 ## Feature matrix
-Pulsar client feature matrix for different languages is listed on [Client Features Matrix](https://github.com/apache/pulsar/wiki/Client-Features-Matrix) page.
+Pulsar client feature matrix for different languages is listed on [Pulsar Feature Matrix (Client and Function)](https://github.com/apache/pulsar/wiki/PIP-108%3A-Pulsar-Feature-Matrix-%28Client-and-Function%29) page.
 
 ## Third-party clients
 
@@ -33,3 +32,4 @@ Besides the official released clients, multiple projects on developing Pulsar cl
 | Scala | [pulsar4s](https://github.com/sksamuel/pulsar4s) | [sksamuel](https://github.com/sksamuel) | [![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0) | Idomatic, typesafe, and reactive Scala client for Apache Pulsar |
 | Rust | [pulsar-rs](https://github.com/wyyerd/pulsar-rs) | [Wyyerd Group](https://github.com/wyyerd) | [![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0) | Future-based Rust bindings for Apache Pulsar |
 | .NET | [pulsar-client-dotnet](https://github.com/fsharplang-ru/pulsar-client-dotnet) | [Lanayx](https://github.com/Lanayx) | [![GitHub](https://img.shields.io/badge/license-MIT-green.svg)](https://opensource.org/licenses/MIT) | Native .NET client for C#/F#/VB |
+| Node.js | [pulsar-flex](https://github.com/ayeo-flex-org/pulsar-flex) | [Daniel Sinai](https://github.com/danielsinai), [Ron Farkash](https://github.com/ronfarkash), [Gal Rosenberg](https://github.com/galrose)| [![GitHub](https://img.shields.io/badge/license-MIT-green.svg)](https://opensource.org/licenses/MIT) | Native Nodejs client |
diff --git a/site2/website-next/versioned_docs/version-2.4.1/concepts-architecture-overview.md b/site2/website-next/versioned_docs/version-2.4.1/concepts-architecture-overview.md
index 6a501d2..8fe0717 100644
--- a/site2/website-next/versioned_docs/version-2.4.1/concepts-architecture-overview.md
+++ b/site2/website-next/versioned_docs/version-2.4.1/concepts-architecture-overview.md
@@ -2,7 +2,6 @@
 id: concepts-architecture-overview
 title: Architecture Overview
 sidebar_label: "Architecture"
-original_id: concepts-architecture-overview
 ---
 
 At the highest level, a Pulsar instance is composed of one or more Pulsar clusters. Clusters within an instance can [replicate](concepts-replication) data amongst themselves.
@@ -146,7 +145,7 @@ Some important things to know about the Pulsar proxy:
 
 ## Service discovery
 
-[Clients](getting-started-clients) connecting to Pulsar brokers need to be able to communicate with an entire Pulsar instance using a single URL. Pulsar provides a built-in service discovery mechanism that you can set up using the instructions in the [Deploying a Pulsar instance](deploy-bare-metal.md#service-discovery-setup) guide.
+[Clients](getting-started-clients) connecting to Pulsar brokers need to be able to communicate with an entire Pulsar instance using a single URL.
 
 You can use your own service discovery system if you'd like. If you use your own system, there is just one requirement: when a client performs an HTTP request to an endpoint, such as `http://pulsar.us-west.example.com:8080`, the client needs to be redirected to *some* active broker in the desired cluster, whether via DNS, an HTTP or IP redirect, or some other means.
 
diff --git a/site2/website-next/versioned_docs/version-2.4.1/concepts-authentication.md b/site2/website-next/versioned_docs/version-2.4.1/concepts-authentication.md
index b375ecb..335da8d 100644
--- a/site2/website-next/versioned_docs/version-2.4.1/concepts-authentication.md
+++ b/site2/website-next/versioned_docs/version-2.4.1/concepts-authentication.md
@@ -2,7 +2,6 @@
 id: concepts-authentication
 title: Authentication and Authorization
 sidebar_label: "Authentication and Authorization"
-original_id: concepts-authentication
 ---
 
 Pulsar supports a pluggable [authentication](security-overview.md) mechanism which can be configured at the proxy and/or the broker. Pulsar also supports a pluggable [authorization](security-authorization) mechanism. These mechanisms work together to identify the client and its access rights on topics, namespaces and tenants.
diff --git a/site2/website-next/versioned_docs/version-2.4.1/concepts-clients.md b/site2/website-next/versioned_docs/version-2.4.1/concepts-clients.md
index b68f76a..65201b5 100644
--- a/site2/website-next/versioned_docs/version-2.4.1/concepts-clients.md
+++ b/site2/website-next/versioned_docs/version-2.4.1/concepts-clients.md
@@ -2,7 +2,6 @@
 id: concepts-clients
 title: Pulsar Clients
 sidebar_label: "Clients"
-original_id: concepts-clients
 ---
 
 Pulsar exposes a client API with language bindings for [Java](client-libraries-java.md), [Go](client-libraries-go.md), [Python](client-libraries-python.md), [C++](client-libraries-cpp.md) and [C#](client-libraries-dotnet). The client API optimizes and encapsulates Pulsar's client-broker communication protocol and exposes a simple and intuitive API for use by applications.
diff --git a/site2/website-next/versioned_docs/version-2.4.1/concepts-multi-tenancy.md b/site2/website-next/versioned_docs/version-2.4.1/concepts-multi-tenancy.md
index be752cc..8a17e72 100644
--- a/site2/website-next/versioned_docs/version-2.4.1/concepts-multi-tenancy.md
+++ b/site2/website-next/versioned_docs/version-2.4.1/concepts-multi-tenancy.md
@@ -2,7 +2,6 @@
 id: concepts-multi-tenancy
 title: Multi Tenancy
 sidebar_label: "Multi Tenancy"
-original_id: concepts-multi-tenancy
 ---
 
 Pulsar was created from the ground up as a multi-tenant system. To support multi-tenancy, Pulsar has a concept of tenants. Tenants can be spread across clusters and can each have their own [authentication and authorization](security-overview) scheme applied to them. They are also the administrative unit at which storage quotas, [message TTL](cookbooks-retention-expiry.md#time-to-live-ttl), and isolation policies can be managed.
diff --git a/site2/website-next/versioned_docs/version-2.4.1/concepts-overview.md b/site2/website-next/versioned_docs/version-2.4.1/concepts-overview.md
index b903fa4..c76032c 100644
--- a/site2/website-next/versioned_docs/version-2.4.1/concepts-overview.md
+++ b/site2/website-next/versioned_docs/version-2.4.1/concepts-overview.md
@@ -2,7 +2,6 @@
 id: concepts-overview
 title: Pulsar Overview
 sidebar_label: "Overview"
-original_id: concepts-overview
 ---
 
 Pulsar is a multi-tenant, high-performance solution for server-to-server messaging. Pulsar was originally developed by Yahoo, it is under the stewardship of the [Apache Software Foundation](https://www.apache.org/).
diff --git a/site2/website-next/versioned_docs/version-2.4.1/concepts-replication.md b/site2/website-next/versioned_docs/version-2.4.1/concepts-replication.md
index 6e23962..11677cc 100644
--- a/site2/website-next/versioned_docs/version-2.4.1/concepts-replication.md
+++ b/site2/website-next/versioned_docs/version-2.4.1/concepts-replication.md
@@ -2,7 +2,6 @@
 id: concepts-replication
 title: Geo Replication
 sidebar_label: "Geo Replication"
-original_id: concepts-replication
 ---
 
 Pulsar enables messages to be produced and consumed in different geo-locations. For instance, your application may be publishing data in one region or market and you would like to process it for consumption in other regions or markets. [Geo-replication](administration-geo) in Pulsar enables you to do that.
diff --git a/site2/website-next/versioned_docs/version-2.4.1/concepts-topic-compaction.md b/site2/website-next/versioned_docs/version-2.4.1/concepts-topic-compaction.md
index c85e703..3356298 100644
--- a/site2/website-next/versioned_docs/version-2.4.1/concepts-topic-compaction.md
+++ b/site2/website-next/versioned_docs/version-2.4.1/concepts-topic-compaction.md
@@ -2,7 +2,6 @@
 id: concepts-topic-compaction
 title: Topic Compaction
 sidebar_label: "Topic Compaction"
-original_id: concepts-topic-compaction
 ---
 
 Pulsar was built with highly scalable [persistent storage](concepts-architecture-overview.md#persistent-storage) of message data as a primary objective. Pulsar topics enable you to persistently store as many unacknowledged messages as you need while preserving message ordering. By default, Pulsar stores *all* unacknowledged/unprocessed messages produced on a topic. Accumulating many unacknowledged messages on a topic is necessary for many Pulsar use cases but it can also be very time int [...]
diff --git a/site2/website-next/versioned_docs/version-2.4.1/cookbooks-bookkeepermetadata.md b/site2/website-next/versioned_docs/version-2.4.1/cookbooks-bookkeepermetadata.md
index b0fa98d..187cb65 100644
--- a/site2/website-next/versioned_docs/version-2.4.1/cookbooks-bookkeepermetadata.md
+++ b/site2/website-next/versioned_docs/version-2.4.1/cookbooks-bookkeepermetadata.md
@@ -1,7 +1,6 @@
 ---
 id: cookbooks-bookkeepermetadata
 title: BookKeeper Ledger Metadata
-original_id: cookbooks-bookkeepermetadata
 ---
 
 Pulsar stores data on BookKeeper ledgers, you can understand the contents of a ledger by inspecting the metadata attached to the ledger.
diff --git a/site2/website-next/versioned_docs/version-2.4.1/cookbooks-deduplication.md b/site2/website-next/versioned_docs/version-2.4.1/cookbooks-deduplication.md
index 1669afa..307fe03 100644
--- a/site2/website-next/versioned_docs/version-2.4.1/cookbooks-deduplication.md
+++ b/site2/website-next/versioned_docs/version-2.4.1/cookbooks-deduplication.md
@@ -1,8 +1,7 @@
 ---
 id: cookbooks-deduplication
 title: Message deduplication
-sidebar_label: "Message deduplication"
-original_id: cookbooks-deduplication
+sidebar_label: "Message deduplication "
 ---
 
 import Tabs from '@theme/Tabs';
diff --git a/site2/website-next/versioned_docs/version-2.4.1/cookbooks-encryption.md b/site2/website-next/versioned_docs/version-2.4.1/cookbooks-encryption.md
index f0d8fb8..fbd1c97 100644
--- a/site2/website-next/versioned_docs/version-2.4.1/cookbooks-encryption.md
+++ b/site2/website-next/versioned_docs/version-2.4.1/cookbooks-encryption.md
@@ -1,8 +1,7 @@
 ---
 id: cookbooks-encryption
 title: Pulsar Encryption
-sidebar_label: "Encryption"
-original_id: cookbooks-encryption
+sidebar_label: "Encryption "
 ---
 
 Pulsar encryption allows applications to encrypt messages at the producer and decrypt at the consumer. Encryption is performed using the public/private key pair configured by the application. Encrypted messages can only be decrypted by consumers with a valid key.
diff --git a/site2/website-next/versioned_docs/version-2.4.1/cookbooks-message-queue.md b/site2/website-next/versioned_docs/version-2.4.1/cookbooks-message-queue.md
index eb43cbd..9b93a94 100644
--- a/site2/website-next/versioned_docs/version-2.4.1/cookbooks-message-queue.md
+++ b/site2/website-next/versioned_docs/version-2.4.1/cookbooks-message-queue.md
@@ -2,7 +2,6 @@
 id: cookbooks-message-queue
 title: Using Pulsar as a message queue
 sidebar_label: "Message queue"
-original_id: cookbooks-message-queue
 ---
 
 Message queues are essential components of many large-scale data architectures. If every single work object that passes through your system absolutely *must* be processed in spite of the slowness or downright failure of this or that system component, there's a good chance that you'll need a message queue to step in and ensure that unprocessed data is retained---with correct ordering---until the required actions are taken.
diff --git a/site2/website-next/versioned_docs/version-2.4.1/cookbooks-non-persistent.md b/site2/website-next/versioned_docs/version-2.4.1/cookbooks-non-persistent.md
index 391569a..d40c4fb 100644
--- a/site2/website-next/versioned_docs/version-2.4.1/cookbooks-non-persistent.md
+++ b/site2/website-next/versioned_docs/version-2.4.1/cookbooks-non-persistent.md
@@ -2,7 +2,6 @@
 id: cookbooks-non-persistent
 title: Non-persistent messaging
 sidebar_label: "Non-persistent messaging"
-original_id: cookbooks-non-persistent
 ---
 
 **Non-persistent topics** are Pulsar topics in which message data is *never* [persistently stored](concepts-architecture-overview.md#persistent-storage) and kept only in memory. This cookbook provides:
diff --git a/site2/website-next/versioned_docs/version-2.4.1/cookbooks-partitioned.md b/site2/website-next/versioned_docs/version-2.4.1/cookbooks-partitioned.md
index 7882fb9..2589693 100644
--- a/site2/website-next/versioned_docs/version-2.4.1/cookbooks-partitioned.md
+++ b/site2/website-next/versioned_docs/version-2.4.1/cookbooks-partitioned.md
@@ -2,6 +2,5 @@
 id: cookbooks-partitioned
 title: Partitioned topics
 sidebar_label: "Partitioned Topics"
-original_id: cookbooks-partitioned
 ---
 For details of the content, refer to [manage topics](admin-api-topics).
\ No newline at end of file
diff --git a/site2/website-next/versioned_docs/version-2.4.1/cookbooks-retention-expiry.md b/site2/website-next/versioned_docs/version-2.4.1/cookbooks-retention-expiry.md
index b9353b5..738cf42 100644
--- a/site2/website-next/versioned_docs/version-2.4.1/cookbooks-retention-expiry.md
+++ b/site2/website-next/versioned_docs/version-2.4.1/cookbooks-retention-expiry.md
@@ -2,7 +2,6 @@
 id: cookbooks-retention-expiry
 title: Message retention and expiry
 sidebar_label: "Message retention and expiry"
-original_id: cookbooks-retention-expiry
 ---
 
 import Tabs from '@theme/Tabs';
@@ -36,7 +35,7 @@ By default, when a Pulsar message arrives at a broker, the message is stored unt
 
 Retention policies are useful when you use the Reader interface. The Reader interface does not use acknowledgements, and messages do not exist within backlogs. It is required to configure retention for Reader-only use cases.
 
-When you set a retention policy on topics in a namespace, you must set **both** a *size limit* and a *time limit*. You can refer to the following table to set retention policies in `pulsar-admin` and Java.
+When you set a retention policy on topics in a namespace, you must set **both** a *size limit* (via `defaultRetentionSizeInMB`) and a *time limit* (via `defaultRetentionTimeInMinutes`) . You can refer to the following table to set retention policies in `pulsar-admin` and Java.
 
 |Time limit|Size limit| Message retention      |
 |----------|----------|------------------------|
@@ -152,7 +151,10 @@ admin.namespaces().setRetention(namespace, policies);
 
 You can fetch the retention policy for a namespace by specifying the namespace. The output will be a JSON object with two keys: `retentionTimeInMinutes` and `retentionSizeInMB`.
 
-#### pulsar-admin
+<Tabs 
+  defaultValue="pulsar-admin"
+  values={[{"label":"pulsar-admin","value":"pulsar-admin"},{"label":"REST API","value":"REST API"},{"label":"Java","value":"Java"}]}>
+<TabItem value="pulsar-admin">
 
 Use the [`get-retention`](reference-pulsar-admin.md#namespaces) subcommand and specify the namespace.
 
@@ -168,11 +170,13 @@ $ pulsar-admin namespaces get-retention my-tenant/my-ns
 
 ```
 
-#### REST API
+</TabItem>
+<TabItem value="REST API">
 
 {@inject: endpoint|GET|/admin/v2/namespaces/:tenant/:namespace/retention|operation/getRetention?version=@pulsar:version_number@}
 
-#### Java
+</TabItem>
+<TabItem value="Java">
 
 ```java
 
@@ -180,15 +184,17 @@ admin.namespaces().getRetention(namespace);
 
 ```
 
+</TabItem>
+
+</Tabs>
+
 ## Backlog quotas
 
 *Backlogs* are sets of unacknowledged messages for a topic that have been stored by bookies. Pulsar stores all unacknowledged messages in backlogs until they are processed and acknowledged.
 
-You can control the allowable size of backlogs, at the namespace level, using *backlog quotas*. Setting a backlog quota involves setting:
+You can control the allowable size and/or time of backlogs, at the namespace level, using *backlog quotas*. Setting a backlog quota involves setting:
 
-TODO: Expand on is this per backlog or per topic?
-
-* an allowable *size threshold* for each topic in the namespace
+* an allowable *size and/or time threshold* for each topic in the namespace
 * a *retention policy* that determines which action the [broker](reference-terminology.md#broker) takes if the threshold is exceeded.
 
 The following retention policies are available:
@@ -210,9 +216,12 @@ Backlog quotas are handled at the namespace level. They can be managed via:
 
 You can set a size and/or time threshold and backlog retention policy for all of the topics in a [namespace](reference-terminology.md#namespace) by specifying the namespace, a size limit and/or a time limit in second, and a policy by name.
 
-#### pulsar-admin
+<Tabs 
+  defaultValue="pulsar-admin"
+  values={[{"label":"pulsar-admin","value":"pulsar-admin"},{"label":"REST API","value":"REST API"},{"label":"Java","value":"Java"}]}>
+<TabItem value="pulsar-admin">
 
-Use the [`set-backlog-quota`](reference-pulsar-admin.md#namespaces) subcommand and specify a namespace, a size limit using the `-l`/`--limit` flag, and a retention policy using the `-p`/`--policy` flag.
+Use the [`set-backlog-quota`](reference-pulsar-admin.md#namespaces) subcommand and specify a namespace, a size limit using the `-l`/`--limit` , `-lt`/`--limitTime` flag to limit backlog, a retention policy using the `-p`/`--policy` flag and a policy type using `-t`/`--type` (default is destination_storage).
 
 ##### Example
 
@@ -220,16 +229,26 @@ Use the [`set-backlog-quota`](reference-pulsar-admin.md#namespaces) subcommand a
 
 $ pulsar-admin namespaces set-backlog-quota my-tenant/my-ns \
   --limit 2G \
-  --limitTime 36000 \
   --policy producer_request_hold
 
 ```
 
-#### REST API
+```shell
+
+$ pulsar-admin namespaces set-backlog-quota my-tenant/my-ns/my-topic \
+--limitTime 3600 \
+--policy producer_request_hold \
+--type message_age
+
+```
+
+</TabItem>
+<TabItem value="REST API">
 
 {@inject: endpoint|POST|/admin/v2/namespaces/:tenant/:namespace/backlogQuota|operation/getBacklogQuotaMap?version=@pulsar:version_number@}
 
-#### Java
+</TabItem>
+<TabItem value="Java">
 
 ```java
 
@@ -240,11 +259,18 @@ admin.namespaces().setBacklogQuota(namespace, quota);
 
 ```
 
+</TabItem>
+
+</Tabs>
+
 ### Get backlog threshold and backlog retention policy
 
 You can see which size threshold and backlog retention policy has been applied to a namespace.
 
-#### pulsar-admin
+<Tabs 
+  defaultValue="pulsar-admin"
+  values={[{"label":"pulsar-admin","value":"pulsar-admin"},{"label":"REST API","value":"REST API"},{"label":"Java","value":"Java"}]}>
+<TabItem value="pulsar-admin">
 
 Use the [`get-backlog-quotas`](reference-pulsar-admin.md#pulsar-admin-namespaces-get-backlog-quotas) subcommand and specify a namespace. Here's an example:
 
@@ -260,11 +286,13 @@ $ pulsar-admin namespaces get-backlog-quotas my-tenant/my-ns
 
 ```
 
-#### REST API
+</TabItem>
+<TabItem value="REST API">
 
 {@inject: endpoint|GET|/admin/v2/namespaces/:tenant/:namespace/backlogQuotaMap|operation/getBacklogQuotaMap?version=@pulsar:version_number@}
 
-#### Java
+</TabItem>
+<TabItem value="Java">
 
 ```java
 
@@ -273,11 +301,18 @@ Map<BacklogQuota.BacklogQuotaType,BacklogQuota> quotas =
 
 ```
 
+</TabItem>
+
+</Tabs>
+
 ### Remove backlog quotas
 
-#### pulsar-admin
+<Tabs 
+  defaultValue="pulsar-admin"
+  values={[{"label":"pulsar-admin","value":"pulsar-admin"},{"label":"REST API","value":"REST API"},{"label":"Java","value":"Java"}]}>
+<TabItem value="pulsar-admin">
 
-Use the [`remove-backlog-quota`](reference-pulsar-admin.md#pulsar-admin-namespaces-remove-backlog-quota) subcommand and specify a namespace. Here's an example:
+Use the [`remove-backlog-quota`](reference-pulsar-admin.md#pulsar-admin-namespaces-remove-backlog-quota) subcommand and specify a namespace, use `t`/`--type` to specify backlog type to remove(default is destination_storage). Here's an example:
 
 ```shell
 
@@ -285,11 +320,13 @@ $ pulsar-admin namespaces remove-backlog-quota my-tenant/my-ns
 
 ```
 
-#### REST API
+</TabItem>
+<TabItem value="REST API">
 
 {@inject: endpoint|DELETE|/admin/v2/namespaces/:tenant/:namespace/backlogQuota|operation/removeBacklogQuota?version=@pulsar:version_number@}
 
-#### Java
+</TabItem>
+<TabItem value="Java">
 
 ```java
 
@@ -297,6 +334,10 @@ admin.namespaces().removeBacklogQuota(namespace);
 
 ```
 
+</TabItem>
+
+</Tabs>
+
 ### Clear backlog
 
 #### pulsar-admin
@@ -319,7 +360,10 @@ By default, Pulsar stores all unacknowledged messages forever. This can lead to
 
 ### Set the TTL for a namespace
 
-#### pulsar-admin
+<Tabs 
+  defaultValue="pulsar-admin"
+  values={[{"label":"pulsar-admin","value":"pulsar-admin"},{"label":"REST API","value":"REST API"},{"label":"Java","value":"Java"}]}>
+<TabItem value="pulsar-admin">
 
 Use the [`set-message-ttl`](reference-pulsar-admin.md#pulsar-admin-namespaces-set-message-ttl) subcommand and specify a namespace and a TTL (in seconds) using the `-ttl`/`--messageTTL` flag.
 
@@ -332,11 +376,13 @@ $ pulsar-admin namespaces set-message-ttl my-tenant/my-ns \
 
 ```
 
-#### REST API
+</TabItem>
+<TabItem value="REST API">
 
 {@inject: endpoint|POST|/admin/v2/namespaces/:tenant/:namespace/messageTTL|operation/setNamespaceMessageTTL?version=@pulsar:version_number@}
 
-#### Java
+</TabItem>
+<TabItem value="Java">
 
 ```java
 
@@ -344,9 +390,16 @@ admin.namespaces().setNamespaceMessageTTL(namespace, ttlInSeconds);
 
 ```
 
+</TabItem>
+
+</Tabs>
+
 ### Get the TTL configuration for a namespace
 
-#### pulsar-admin
+<Tabs 
+  defaultValue="pulsar-admin"
+  values={[{"label":"pulsar-admin","value":"pulsar-admin"},{"label":"REST API","value":"REST API"},{"label":"Java","value":"Java"}]}>
+<TabItem value="pulsar-admin">
 
 Use the [`get-message-ttl`](reference-pulsar-admin.md#pulsar-admin-namespaces-get-message-ttl) subcommand and specify a namespace.
 
@@ -359,11 +412,13 @@ $ pulsar-admin namespaces get-message-ttl my-tenant/my-ns
 
 ```
 
-#### REST API
+</TabItem>
+<TabItem value="REST API">
 
 {@inject: endpoint|GET|/admin/v2/namespaces/:tenant/:namespace/messageTTL|operation/getNamespaceMessageTTL?version=@pulsar:version_number@}
 
-#### Java
+</TabItem>
+<TabItem value="Java">
 
 ```java
 
@@ -371,9 +426,16 @@ admin.namespaces().getNamespaceMessageTTL(namespace)
 
 ```
 
+</TabItem>
+
+</Tabs>
+
 ### Remove the TTL configuration for a namespace
 
-#### pulsar-admin
+<Tabs 
+  defaultValue="pulsar-admin"
+  values={[{"label":"pulsar-admin","value":"pulsar-admin"},{"label":"REST API","value":"REST API"},{"label":"Java","value":"Java"}]}>
+<TabItem value="pulsar-admin">
 
 Use the [`remove-message-ttl`](reference-pulsar-admin.md#pulsar-admin-namespaces-remove-message-ttl) subcommand and specify a namespace.
 
@@ -385,11 +447,13 @@ $ pulsar-admin namespaces remove-message-ttl my-tenant/my-ns
 
 ```
 
-#### REST API
+</TabItem>
+<TabItem value="REST API">
 
 {@inject: endpoint|DELETE|/admin/v2/namespaces/:tenant/:namespace/messageTTL|operation/removeNamespaceMessageTTL?version=@pulsar:version_number@}
 
-#### Java
+</TabItem>
+<TabItem value="Java">
 
 ```java
 
@@ -397,6 +461,10 @@ admin.namespaces().removeNamespaceMessageTTL(namespace)
 
 ```
 
+</TabItem>
+
+</Tabs>
+
 ## Delete messages from namespaces
 
 If you do not have any retention period and that you never have much of a backlog, the upper limit for retaining messages, which are acknowledged, equals to the Pulsar segment rollover period + entry log rollover period + (garbage collection interval * garbage collection ratios).
diff --git a/site2/website-next/versioned_docs/version-2.4.1/cookbooks-tiered-storage.md b/site2/website-next/versioned_docs/version-2.4.1/cookbooks-tiered-storage.md
index 8f6a7fb..f2ea50d 100644
--- a/site2/website-next/versioned_docs/version-2.4.1/cookbooks-tiered-storage.md
+++ b/site2/website-next/versioned_docs/version-2.4.1/cookbooks-tiered-storage.md
@@ -2,14 +2,15 @@
 id: cookbooks-tiered-storage
 title: Tiered Storage
 sidebar_label: "Tiered Storage"
-original_id: cookbooks-tiered-storage
 ---
 
 Pulsar's **Tiered Storage** feature allows older backlog data to be offloaded to long term storage, thereby freeing up space in BookKeeper and reducing storage costs. This cookbook walks you through using tiered storage in your Pulsar cluster.
 
-* Tiered storage uses [Apache jclouds](https://jclouds.apache.org) to support [Amazon S3](https://aws.amazon.com/s3/) and [Google Cloud Storage](https://cloud.google.com/storage/)(GCS for short) for long term storage. With Jclouds, it is easy to add support for more [cloud storage providers](https://jclouds.apache.org/reference/providers/#blobstore-providers) in the future.
+* Tiered storage uses [Apache jclouds](https://jclouds.apache.org) to support [Amazon S3](https://aws.amazon.com/s3/) and [Google Cloud Storage](https://cloud.google.com/storage/)(GCS for short)
+for long term storage. With Jclouds, it is easy to add support for more [cloud storage providers](https://jclouds.apache.org/reference/providers/#blobstore-providers) in the future.
 
-* Tiered storage uses [Apache Hadoop](http://hadoop.apache.org/) to support filesystem for long term storage. With Hadoop, it is easy to add support for more filesystem in the future.
+* Tiered storage uses [Apache Hadoop](http://hadoop.apache.org/) to support filesystem for long term storage. 
+With Hadoop, it is easy to add support for more filesystem in the future.
 
 ## When should I use Tiered Storage?
 
diff --git a/site2/website-next/versioned_docs/version-2.4.1/deploy-aws.md b/site2/website-next/versioned_docs/version-2.4.1/deploy-aws.md
index 6323051..2034749 100644
--- a/site2/website-next/versioned_docs/version-2.4.1/deploy-aws.md
+++ b/site2/website-next/versioned_docs/version-2.4.1/deploy-aws.md
@@ -2,7 +2,6 @@
 id: deploy-aws
 title: Deploying a Pulsar cluster on AWS using Terraform and Ansible
 sidebar_label: "Amazon Web Services"
-original_id: deploy-aws
 ---
 
 > For instructions on deploying a single Pulsar cluster manually rather than using Terraform and Ansible, see [Deploying a Pulsar cluster on bare metal](deploy-bare-metal.md). For instructions on manually deploying a multi-cluster Pulsar instance, see [Deploying a Pulsar instance on bare metal](deploy-bare-metal-multi-cluster).
@@ -148,7 +147,7 @@ Variable name | Description | Default
 When you run the Ansible playbook, the following AWS resources are used:
 
 * 9 total [Elastic Compute Cloud](https://aws.amazon.com/ec2) (EC2) instances running the [ami-9fa343e7](https://access.redhat.com/articles/3135091) Amazon Machine Image (AMI), which runs [Red Hat Enterprise Linux (RHEL) 7.4](https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/7/html-single/7.4_release_notes/index). By default, that includes:
-  * 3 small VMs for ZooKeeper ([t2.small](https://www.ec2instances.info/?selected=t2.small) instances)
+  * 3 small VMs for ZooKeeper ([t3.small](https://www.ec2instances.info/?selected=t3.small) instances)
   * 3 larger VMs for BookKeeper [bookies](reference-terminology.md#bookie) ([i3.xlarge](https://www.ec2instances.info/?selected=i3.xlarge) instances)
   * 2 larger VMs for Pulsar [brokers](reference-terminology.md#broker) ([c5.2xlarge](https://www.ec2instances.info/?selected=c5.2xlarge) instances)
   * 1 larger VMs for Pulsar [proxy](reference-terminology.md#proxy) ([c5.2xlarge](https://www.ec2instances.info/?selected=c5.2xlarge) instances)
diff --git a/site2/website-next/versioned_docs/version-2.4.1/deploy-bare-metal-multi-cluster.md b/site2/website-next/versioned_docs/version-2.4.1/deploy-bare-metal-multi-cluster.md
index 643c122..9dd2526 100644
--- a/site2/website-next/versioned_docs/version-2.4.1/deploy-bare-metal-multi-cluster.md
+++ b/site2/website-next/versioned_docs/version-2.4.1/deploy-bare-metal-multi-cluster.md
@@ -2,38 +2,30 @@
 id: deploy-bare-metal-multi-cluster
 title: Deploying a multi-cluster on bare metal
 sidebar_label: "Bare metal multi-cluster"
-original_id: deploy-bare-metal-multi-cluster
 ---
 
 :::tip
 
-1. Single-cluster Pulsar installations should be sufficient for all but the most ambitious use cases. If you are interested in experimenting with
-Pulsar or using it in a startup or on a single team, you had better opt for a single cluster. For instructions on deploying a single cluster,
-see the guide [here](deploy-bare-metal).
-2. If you want to use all builtin [Pulsar IO](io-overview) connectors in your Pulsar deployment, you need to download `apache-pulsar-io-connectors`
-package and install `apache-pulsar-io-connectors` under `connectors` directory in the pulsar directory on every broker node or on every function-worker node if you
-run a separate cluster of function workers for [Pulsar Functions](functions-overview).
-3. If you want to use [Tiered Storage](concepts-tiered-storage) feature in your Pulsar deployment, you need to download `apache-pulsar-offloaders`
-package and install `apache-pulsar-offloaders` under `offloaders` directory in the pulsar directory on every broker node. For more details of how to configure
-this feature, you can refer to the [Tiered storage cookbook](cookbooks-tiered-storage).
+1. You can use single-cluster Pulsar installation in most use cases, such as experimenting with Pulsar or using Pulsar in a startup or in a single team. If you need to run a multi-cluster Pulsar instance, see the [guide](deploy-bare-metal-multi-cluster).
+2. If you want to use all built-in [Pulsar IO](io-overview.md) connectors, you need to download `apache-pulsar-io-connectors`package and install `apache-pulsar-io-connectors` under `connectors` directory in the pulsar directory on every broker node or on every function-worker node if you have run a separate cluster of function workers for [Pulsar Functions](functions-overview).
+3. If you want to use [Tiered Storage](concepts-tiered-storage.md) feature in your Pulsar deployment, you need to download `apache-pulsar-offloaders`package and install `apache-pulsar-offloaders` under `offloaders` directory in the Pulsar directory on every broker node. For more details of how to configure this feature, you can refer to the [Tiered storage cookbook](cookbooks-tiered-storage).
 
 :::
 
-A Pulsar *instance* consists of multiple Pulsar clusters working in unison. You can distribute clusters across data centers or geographical regions and replicate the clusters amongst themselves using [geo-replication](administration-geo). Deploying a multi-cluster Pulsar instance involves the following basic steps:
+A Pulsar instance consists of multiple Pulsar clusters working in unison. You can distribute clusters across data centers or geographical regions and replicate the clusters amongst themselves using [geo-replication](administration-geo).Deploying a  multi-cluster Pulsar instance consists of the following steps:
 
-* Deploying two separate [ZooKeeper](#deploy-zookeeper) quorums: a [local](#deploy-local-zookeeper) quorum for each cluster in the instance and a [configuration store](#configuration-store) quorum for instance-wide tasks
-* Initializing [cluster metadata](#cluster-metadata-initialization) for each cluster
-* Deploying a [BookKeeper cluster](#deploy-bookkeeper) of bookies in each Pulsar cluster
-* Deploying [brokers](#deploy-brokers) in each Pulsar cluster
+1. Deploying two separate ZooKeeper quorums: a local quorum for each cluster in the instance and a configuration store quorum for instance-wide tasks
+2. Initializing cluster metadata for each cluster
+3. Deploying a BookKeeper cluster of bookies in each Pulsar cluster
+4. Deploying brokers in each Pulsar cluster
 
-If you want to deploy a single Pulsar cluster, see [Clusters and Brokers](getting-started-standalone.md#start-the-cluster).
 
 > #### Run Pulsar locally or on Kubernetes?
-> This guide shows you how to deploy Pulsar in production in a non-Kubernetes environment. If you want to run a standalone Pulsar cluster on a single machine for development purposes, see the [Setting up a local cluster](getting-started-standalone.md) guide. If you want to run Pulsar on [Kubernetes](https://kubernetes.io), see the [Pulsar on Kubernetes](deploy-kubernetes) guide, which includes sections on running Pulsar on Kubernetes on [Google Kubernetes Engine](deploy-kubernetes#pulsar [...]
+> This guide shows you how to deploy Pulsar in production in a non-Kubernetes environment. If you want to run a standalone Pulsar cluster on a single machine for development purposes, see the [Setting up a local cluster](getting-started-standalone.md) guide. If you want to run Pulsar on [Kubernetes](https://kubernetes.io), see the [Pulsar on Kubernetes](deploy-kubernetes) guide, which includes sections on running Pulsar on Kubernetes, on Google Kubernetes Engine and on Amazon Web Services.
 
 ## System requirement
 
-Currently, Pulsar is available for 64-bit **macOS**, **Linux**, and **Windows**. To use Pulsar, you need to install 64-bit JRE/JDK 8 or later versions.
+Currently, Pulsar is available for 64-bit **macOS**, **Linux**, and **Windows**. You need to install 64-bit JRE/JDK 8 or later versions.
 
 :::note
 
@@ -68,8 +60,6 @@ $ cd apache-pulsar-@pulsar:version@
 
 ```
 
-## What your package contains
-
 The Pulsar binary package initially contains the following directories:
 
 Directory | Contains
@@ -93,17 +83,17 @@ Directory | Contains
 
 Each Pulsar instance relies on two separate ZooKeeper quorums.
 
-* [Local ZooKeeper](#deploy-local-zookeeper) operates at the cluster level and provides cluster-specific configuration management and coordination. Each Pulsar cluster needs to have a dedicated ZooKeeper cluster.
-* [Configuration Store](#deploy-the-configuration-store) operates at the instance level and provides configuration management for the entire system (and thus across clusters). An independent cluster of machines or the same machines that local ZooKeeper uses can provide the configuration store quorum.
+* Local ZooKeeper operates at the cluster level and provides cluster-specific configuration management and coordination. Each Pulsar cluster needs a dedicated ZooKeeper cluster.
+* Configuration Store operates at the instance level and provides configuration management for the entire system (and thus across clusters). An independent cluster of machines or the same machines that local ZooKeeper uses can provide the configuration store quorum.
 
-The configuration store quorum can be provided by an independent cluster of machines or by the same machines used by local ZooKeeper.
+You can use an independent cluster of machines or the same machines used by local ZooKeeper to provide the configuration store quorum.
 
 
 ### Deploy local ZooKeeper
 
 ZooKeeper manages a variety of essential coordination-related and configuration-related tasks for Pulsar.
 
-You need to stand up one local ZooKeeper cluster *per Pulsar cluster* for deploying a Pulsar instance. 
+You need to stand up one local ZooKeeper cluster per Pulsar cluster for deploying a Pulsar instance. 
 
 To begin, add all ZooKeeper servers to the quorum configuration specified in the [`conf/zookeeper.conf`](reference-configuration.md#zookeeper) file. Add a `server.N` line for each node in the cluster to the configuration, where `N` is the number of the ZooKeeper node. The following is an example for a three-node cluster:
 
@@ -117,7 +107,11 @@ server.3=zk3.us-west.example.com:2888:3888
 
 On each host, you need to specify the ID of the node in the `myid` file of each node, which is in `data/zookeeper` folder of each server by default (you can change the file location via the [`dataDir`](reference-configuration.md#zookeeper-dataDir) parameter).
 
-> See the [Multi-server setup guide](https://zookeeper.apache.org/doc/r3.4.10/zookeeperAdmin.html#sc_zkMulitServerSetup) in the ZooKeeper documentation for detailed information on `myid` and more.
+:::tip
+
+See the [Multi-server setup guide](https://zookeeper.apache.org/doc/r3.4.10/zookeeperAdmin.html#sc_zkMulitServerSetup) in the ZooKeeper documentation for detailed information on `myid` and more.
+
+:::
 
 On a ZooKeeper server at `zk1.us-west.example.com`, for example, you could set the `myid` value like this:
 
@@ -140,15 +134,15 @@ $ bin/pulsar-daemon start zookeeper
 
 ### Deploy the configuration store 
 
-The ZooKeeper cluster that is configured and started up in the section above is a *local* ZooKeeper cluster that you can use to manage a single Pulsar cluster. In addition to a local cluster, however, a full Pulsar instance also requires a configuration store for handling some instance-level configuration and coordination tasks.
+The ZooKeeper cluster configured and started up in the section above is a local ZooKeeper cluster that you can use to manage a single Pulsar cluster. In addition to a local cluster, however, a full Pulsar instance also requires a configuration store for handling some instance-level configuration and coordination tasks.
 
-If you deploy a [single-cluster](#single-cluster-pulsar-instance) instance, you do not need a separate cluster for the configuration store. If, however, you deploy a [multi-cluster](#multi-cluster-pulsar-instance) instance, you should stand up a separate ZooKeeper cluster for configuration tasks.
+If you deploy a single-cluster instance, you do not need a separate cluster for the configuration store. If, however, you deploy a multi-cluster instance, you should stand up a separate ZooKeeper cluster for configuration tasks.
 
 #### Single-cluster Pulsar instance
 
 If your Pulsar instance consists of just one cluster, then you can deploy a configuration store on the same machines as the local ZooKeeper quorum but run on different TCP ports.
 
-To deploy a ZooKeeper configuration store in a single-cluster instance, add the same ZooKeeper servers that the local quorum uses to the configuration file in [`conf/global_zookeeper.conf`](reference-configuration.md#configuration-store) using the same method for [local ZooKeeper](#local-zookeeper), but make sure to use a different port (2181 is the default for ZooKeeper). The following is an example that uses port 2184 for a three-node ZooKeeper cluster:
+To deploy a ZooKeeper configuration store in a single-cluster instance, add the same ZooKeeper servers that the local quorum. You need to use the configuration file in [`conf/global_zookeeper.conf`](reference-configuration.md#configuration-store) using the same method for [local ZooKeeper](#local-zookeeper), but make sure to use a different port (2181 is the default for ZooKeeper). The following is an example that uses port 2184 for a three-node ZooKeeper cluster:
 
 ```properties
 
@@ -165,13 +159,11 @@ As before, create the `myid` files for each server on `data/global-zookeeper/myi
 
 When you deploy a global Pulsar instance, with clusters distributed across different geographical regions, the configuration store serves as a highly available and strongly consistent metadata store that can tolerate failures and partitions spanning whole regions.
 
-The key here is to make sure the ZK quorum members are spread across at least 3 regions and that other regions run as observers.
+The key here is to make sure the ZK quorum members are spread across at least 3 regions, and other regions run as observers.
 
-Again, given the very low expected load on the configuration store servers, you can
-share the same hosts used for the local ZooKeeper quorum.
+Again, given the very low expected load on the configuration store servers, you can share the same hosts used for the local ZooKeeper quorum.
 
-For example, assume a Pulsar instance with the following clusters `us-west`,
-`us-east`, `us-central`, `eu-central`, `ap-south`. Also assume, each cluster has its own local ZK servers named such as the following: 
+For example, assume a Pulsar instance with the following clusters `us-west`, `us-east`, `us-central`, `eu-central`, `ap-south`. Also assume, each cluster has its own local ZK servers named such as the following: 
 
 ```
 
@@ -179,8 +171,7 @@ zk[1-3].${CLUSTER}.example.com
 
 ```
 
-In this scenario if you want to pick the quorum participants from few clusters and
-let all the others be ZK observers. For example, to form a 7 servers quorum, you can pick 3 servers from `us-west`, 2 from `us-central` and 2 from `us-east`.
+In this scenario if you want to pick the quorum participants from few clusters and let all the others be ZK observers. For example, to form a 7 servers quorum, you can pick 3 servers from `us-west`, 2 from `us-central` and 2 from `us-east`.
 
 This method guarantees that writes to configuration store is possible even if one of these regions is unreachable.
 
@@ -227,7 +218,7 @@ $ bin/pulsar-daemon start configuration-store
 
 ## Cluster metadata initialization
 
-Once you set up the cluster-specific ZooKeeper and configuration store quorums for your instance, you need to write some metadata to ZooKeeper for each cluster in your instance. **you only needs to write these metadata once**.
+Once you set up the cluster-specific ZooKeeper and configuration store quorums for your instance, you need to write some metadata to ZooKeeper for each cluster in your instance. **you only need to write these metadata once**.
 
 You can initialize this metadata using the [`initialize-cluster-metadata`](reference-cli-tools.md#pulsar-initialize-cluster-metadata) command of the [`pulsar`](reference-cli-tools.md#pulsar) CLI tool. The following is an example:
 
@@ -260,7 +251,7 @@ Make sure to run `initialize-cluster-metadata` for each cluster in your instance
 
 BookKeeper provides [persistent message storage](concepts-architecture-overview.md#persistent-storage) for Pulsar.
 
-Each Pulsar broker needs to have its own cluster of bookies. The BookKeeper cluster shares a local ZooKeeper quorum with the Pulsar cluster.
+Each Pulsar broker needs its own cluster of bookies. The BookKeeper cluster shares a local ZooKeeper quorum with the Pulsar cluster.
 
 ### Configure bookies
 
@@ -280,7 +271,7 @@ $ bin/pulsar-daemon start bookie
 
 You can verify that the bookie works properly using the `bookiesanity` command for the [BookKeeper shell](reference-cli-tools.md#bookkeeper-shell):
 
-```shell
+```bash
 
 $ bin/bookkeeper shell bookiesanity
 
@@ -304,7 +295,7 @@ Bookie hosts are responsible for storing message data on disk. In order for book
 Message entries written to bookies are always synced to disk before returning an acknowledgement to the Pulsar broker. To ensure low write latency, BookKeeper is
 designed to use multiple devices:
 
-* A **journal** to ensure durability. For sequential writes, having fast [fsync](https://linux.die.net/man/2/fsync) operations on bookie hosts is critical. Typically, small and fast [solid-state drives](https://en.wikipedia.org/wiki/Solid-state_drive) (SSDs) should suffice, or [hard disk drives](https://en.wikipedia.org/wiki/Hard_disk_drive) (HDDs) with a [RAID](https://en.wikipedia.org/wiki/RAID)s controller and a battery-backed write cache. Both solutions can reach fsync latency of ~0.4 ms.
+* A **journal** to ensure durability. For sequential writes, having fast [fsync](https://linux.die.net/man/2/fsync) operations on bookie hosts is critical. Typically, small and fast [solid-state drives](https://en.wikipedia.org/wiki/Solid-state_drive) (SSDs) should suffice, or [hard disk drives](https://en.wikipedia.org/wiki/Hard_disk_drive) (HDDs) with a [RAID](https://en.wikipedia.org/wiki/RAID) controller and a battery-backed write cache. Both solutions can reach fsync latency of ~0.4 ms.
 * A **ledger storage device** is where data is stored until all consumers acknowledge the message. Writes happen in the background, so write I/O is not a big concern. Reads happen sequentially most of the time and the backlog is drained only in case of consumer drain. To store large amounts of data, a typical configuration involves multiple HDDs with a RAID controller.
 
 
@@ -371,39 +362,13 @@ $ bin/pulsar broker
 
 ## Service discovery
 
-[Clients](getting-started-clients) connecting to Pulsar brokers need to be able to communicate with an entire Pulsar instance using a single URL. Pulsar provides a built-in service discovery mechanism that you can set up using the instructions [immediately below](#service-discovery-setup).
+[Clients](getting-started-clients) connecting to Pulsar brokers need to communicate with an entire Pulsar instance using a single URL.
 
-You can also use your own service discovery system if you want. If you use your own system, you only need to satisfy just one requirement: when a client performs an HTTP request to an [endpoint](reference-configuration) for a Pulsar cluster, such as `http://pulsar.us-west.example.com:8080`, the client needs to be redirected to *some* active broker in the desired cluster, whether via DNS, an HTTP or IP redirect, or some other means.
+You can use your own service discovery system. If you use your own system, you only need to satisfy just one requirement: when a client performs an HTTP request to an [endpoint](reference-configuration) for a Pulsar cluster, such as `http://pulsar.us-west.example.com:8080`, the client needs to be redirected to some active brokers in the desired cluster, whether via DNS, an HTTP or IP redirect, or some other means.
 
-> #### Service discovery already provided by many scheduling systems
+> **Service discovery already provided by many scheduling systems**
 > Many large-scale deployment systems, such as [Kubernetes](deploy-kubernetes), have service discovery systems built in. If you run Pulsar on such a system, you may not need to provide your own service discovery mechanism.
 
-
-### Service discovery setup
-
-The service discovery mechanism that included with Pulsar maintains a list of active brokers, which stored in ZooKeeper, and supports lookup using HTTP and also the [binary protocol](developing-binary-protocol) of Pulsar.
-
-To get started setting up the built-in service of discovery of Pulsar, you need to change a few parameters in the [`conf/discovery.conf`](reference-configuration.md#service-discovery) configuration file. Set the [`zookeeperServers`](reference-configuration.md#service-discovery-zookeeperServers) parameter to the ZooKeeper quorum connection string of the cluster and the [`configurationStoreServers`](reference-configuration.md#service-discovery-configurationStoreServers) setting to the [con [...]
-store](reference-terminology.md#configuration-store) quorum connection string.
-
-```properties
-
-# Zookeeper quorum connection string
-zookeeperServers=zk1.us-west.example.com:2181,zk2.us-west.example.com:2181,zk3.us-west.example.com:2181
-
-# Global configuration store connection string
-configurationStoreServers=zk1.us-west.example.com:2184,zk2.us-west.example.com:2184,zk3.us-west.example.com:2184
-
-```
-
-To start the discovery service:
-
-```shell
-
-$ bin/pulsar-daemon start discovery
-
-```
-
 ## Admin client and verification
 
 At this point your Pulsar instance should be ready to use. You can now configure client machines that can serve as [administrative clients](admin-api-overview) for each cluster. You can use the [`conf/client.conf`](reference-configuration.md#client) configuration file to configure admin clients.
diff --git a/site2/website-next/versioned_docs/version-2.4.2/develop-bare-metal.md b/site2/website-next/versioned_docs/version-2.4.1/deploy-bare-metal.md
similarity index 100%
rename from site2/website-next/versioned_docs/version-2.4.2/develop-bare-metal.md
rename to site2/website-next/versioned_docs/version-2.4.1/deploy-bare-metal.md
diff --git a/site2/website-next/versioned_docs/version-2.4.1/deploy-dcos.md b/site2/website-next/versioned_docs/version-2.4.1/deploy-dcos.md
index f5f8d1f..07f446e 100644
--- a/site2/website-next/versioned_docs/version-2.4.1/deploy-dcos.md
+++ b/site2/website-next/versioned_docs/version-2.4.1/deploy-dcos.md
@@ -7,18 +7,17 @@ original_id: deploy-dcos
 
 :::tip
 
-If you want to enable all builtin [Pulsar IO](io-overview) connectors in your Pulsar deployment, you can choose to use `apachepulsar/pulsar-all` image instead of
-`apachepulsar/pulsar` image. `apachepulsar/pulsar-all` image has already bundled [all builtin connectors](io-overview.md#working-with-connectors).
+To enable all built-in [Pulsar IO](io-overview) connectors in your Pulsar deploymente, we recommend you use `apachepulsar/pulsar-all` image instead of `apachepulsar/pulsar` image; the former has already bundled [all built-in connectors](io-overview.md#working-with-connectors).
 
 :::
 
-[DC/OS](https://dcos.io/) (the <strong>D</strong>ata<strong>C</strong>enter <strong>O</strong>perating <strong>S</strong>ystem) is a distributed operating system used for deploying and managing applications and systems on [Apache Mesos](http://mesos.apache.org/). DC/OS is an open-source tool that [Mesosphere](https://mesosphere.com/) creates and maintains .
+[DC/OS](https://dcos.io/) (the <strong>D</strong>ata<strong>C</strong>enter <strong>O</strong>perating <strong>S</strong>ystem) is a distributed operating system for deploying and managing applications and systems on [Apache Mesos](http://mesos.apache.org/). DC/OS is an open-source tool created and maintained by [Mesosphere](https://mesosphere.com/).
 
 Apache Pulsar is available as a [Marathon Application Group](https://mesosphere.github.io/marathon/docs/application-groups.html), which runs multiple applications as manageable sets.
 
 ## Prerequisites
 
-In order to run Pulsar on DC/OS, you need the following:
+You need to prepare your environment before running Pulsar on DC/OS.
 
 * DC/OS version [1.9](https://docs.mesosphere.com/1.9/) or higher
 * A [DC/OS cluster](https://docs.mesosphere.com/1.9/installing/) with at least three agent nodes
@@ -37,7 +36,7 @@ Each node in the DC/OS-managed Mesos cluster must have at least:
 * 4 GB of memory
 * 60 GB of total persistent disk
 
-Alternatively, you can change the configuration in `PulsarGroups.json` according to match your resources of DC/OS cluster.
+Alternatively, you can change the configuration in `PulsarGroups.json` accordingly to match your resources of the DC/OS cluster.
 
 ## Deploy Pulsar using the DC/OS command interface
 
@@ -56,9 +55,9 @@ This command deploys Docker container instances in three groups, which together
 * 1 [Prometheus](http://prometheus.io/) instance and 1 [Grafana](https://grafana.com/) instance
 
 
-> When you run DC/OS, a ZooKeeper cluster already runs at `master.mesos:2181`, thus you do not have to install or start up ZooKeeper separately.
+> When you run DC/OS, a ZooKeeper cluster will be running at `master.mesos:2181`, thus you do not have to install or start up ZooKeeper separately.
 
-After executing the `dcos` command above, click on the **Services** tab in the DC/OS [GUI interface](https://docs.mesosphere.com/latest/gui/), which you can access at [http://m1.dcos](http://m1.dcos) in this example. You should see several applications in the process of deploying.
+After executing the `dcos` command above, click the **Services** tab in the DC/OS [GUI interface](https://docs.mesosphere.com/latest/gui/), which you can access at [http://m1.dcos](http://m1.dcos) in this example. You should see several applications during the deployment.
 
 ![DC/OS command executed](/assets/dcos_command_execute.png)
 
@@ -66,15 +65,15 @@ After executing the `dcos` command above, click on the **Services** tab in the D
 
 ## The BookKeeper group
 
-To monitor the status of the BookKeeper cluster deployment, click on the **bookkeeper** group in the parent **pulsar** group.
+To monitor the status of the BookKeeper cluster deployment, click the **bookkeeper** group in the parent **pulsar** group.
 
 ![DC/OS bookkeeper status](/assets/dcos_bookkeeper_status.png)
 
-At this point, 3 [bookies](reference-terminology.md#bookie) should be shown as green, which means that the bookies have been deployed successfully and are now running.
+At this point, the status of the 3 [bookies](reference-terminology.md#bookie) are green, which means that the bookies have been deployed successfully and are running.
  
 ![DC/OS bookkeeper running](/assets/dcos_bookkeeper_run.png)
  
-You can also click into each bookie instance to get more detailed information, such as the bookie running log.
+You can also click each bookie instance to get more detailed information, such as the bookie running log.
 
 ![DC/OS bookie log](/assets/dcos_bookie_log.png)
 
@@ -82,23 +81,23 @@ To display information about the BookKeeper in ZooKeeper, you can visit [http://
 
 ![DC/OS bookkeeper in zk](/assets/dcos_bookkeeper_in_zookeeper.png)
 
-## The Pulsar broker Group
+## The Pulsar broker group
 
-Similar to the BookKeeper group above, click into the **brokers** to check the status of the Pulsar brokers.
+Similar to the BookKeeper group above, click **brokers** to check the status of the Pulsar brokers.
 
 ![DC/OS broker status](/assets/dcos_broker_status.png)
 
 ![DC/OS broker running](/assets/dcos_broker_run.png)
 
-You can also click into each broker instance to get more detailed information, such as the broker running log.
+You can also click each broker instance to get more detailed information, such as the broker running log.
 
 ![DC/OS broker log](/assets/dcos_broker_log.png)
 
-Broker cluster information in Zookeeper is also available through the web UI. In this example, you can see that the `loadbalance` and `managed-ledgers` directories have been created.
+Broker cluster information in ZooKeeper is also available through the web UI. In this example, you can see that the `loadbalance` and `managed-ledgers` directories have been created.
 
 ![DC/OS broker in zk](/assets/dcos_broker_in_zookeeper.png)
 
-## Monitor Group
+## Monitor group
 
 The **monitory** group consists of Prometheus and Grafana.
 
@@ -106,17 +105,17 @@ The **monitory** group consists of Prometheus and Grafana.
 
 ### Prometheus
 
-Click into the instance of `prom` to get the endpoint of Prometheus, which is `192.168.65.121:9090` in this example.
+Click the instance of `prom` to get the endpoint of Prometheus, which is `192.168.65.121:9090` in this example.
 
 ![DC/OS prom endpoint](/assets/dcos_prom_endpoint.png)
 
-If you click that endpoint, you can see the Prometheus dashboard. The [http://192.168.65.121:9090/targets](http://192.168.65.121:9090/targets) URL display all the bookies and brokers.
+If you click that endpoint, you can see the Prometheus dashboard. All the bookies and brokers are listed on [http://192.168.65.121:9090/targets](http://192.168.65.121:9090/targets).
 
 ![DC/OS prom targets](/assets/dcos_prom_targets.png)
 
 ### Grafana
 
-Click into `grafana` to get the endpoint for Grafana, which is `192.168.65.121:3000` in this example.
+Click `grafana` to get the endpoint for Grafana, which is `192.168.65.121:3000` in this example.
  
 ![DC/OS grafana endpoint](/assets/dcos_grafana_endpoint.png)
 
@@ -130,7 +129,7 @@ Now that you have a fully deployed Pulsar cluster, you can run a simple consumer
 
 ### Download and prepare the Pulsar Java tutorial
 
-You can clone a [Pulsar Java tutorial](https://github.com/streamlio/pulsar-java-tutorial) repo. This repo contains a simple Pulsar consumer and producer (you can find more information in the `README` file of the repo).
+You can clone a [Pulsar Java tutorial](https://github.com/streamlio/pulsar-java-tutorial) repo. This repo contains a simple Pulsar consumer and producer (you can find more information in the `README` file in this repo).
 
 ```bash
 
@@ -138,12 +137,13 @@ $ git clone https://github.com/streamlio/pulsar-java-tutorial
 
 ```
 
-Change the `SERVICE_URL` from `pulsar://localhost:6650` to `pulsar://a1.dcos:6650` in both [`ConsumerTutorial.java`](https://github.com/streamlio/pulsar-java-tutorial/blob/master/src/main/java/tutorial/ConsumerTutorial.java) and [`ProducerTutorial.java`](https://github.com/streamlio/pulsar-java-tutorial/blob/master/src/main/java/tutorial/ProducerTutorial.java).
-The `pulsar://a1.dcos:6650` endpoint is for the broker service. You can fetch the endpoint details for each broker instance from the DC/OS GUI. `a1.dcos` is a DC/OS client agent, which runs a broker. The client agent IP address can also replace this.
+Change the `SERVICE_URL` from `pulsar://localhost:6650` to `pulsar://a1.dcos:6650` in both [`ConsumerTutorial.java`](https://github.com/streamlio/pulsar-java-tutorial/blob/master/src/main/java/tutorial/ConsumerTutorial.java) file and [`ProducerTutorial.java`](https://github.com/streamlio/pulsar-java-tutorial/blob/master/src/main/java/tutorial/ProducerTutorial.java) file.
 
-Now, change the message number from 10 to 10000000 in main method of [`ProducerTutorial.java`](https://github.com/streamlio/pulsar-java-tutorial/blob/master/src/main/java/tutorial/ProducerTutorial.java) so that it can produce more messages.
+The `pulsar://a1.dcos:6650` endpoint is for the broker service. You can fetch the endpoint details for each broker instance from the DC/OS GUI. `a1.dcos` is a DC/OS client agent that runs a broker, and you can replace it with the client agent IP address.
 
-Now compile the project code using the command below:
+Now, you can change the message number from 10 to 10000000 in the main method in [`ProducerTutorial.java`](https://github.com/streamlio/pulsar-java-tutorial/blob/master/src/main/java/tutorial/ProducerTutorial.java) file to produce more messages.
+
+Then, you can compile the project code using the command below:
 
 ```bash
 
@@ -169,7 +169,7 @@ $ mvn exec:java -Dexec.mainClass="tutorial.ProducerTutorial"
 
 ```
 
-You can see the producer producing messages and the consumer consuming messages through the DC/OS GUI.
+You see that the producer is producing messages and the consumer is consuming messages through the DC/OS GUI.
 
 ![DC/OS pulsar producer](/assets/dcos_producer.png)
 
@@ -177,20 +177,20 @@ You can see the producer producing messages and the consumer consuming messages
 
 ### View Grafana metric output
 
-While the producer and consumer run, you can access running metrics information from Grafana.
+While the producer and consumer are running, you can access the running metrics from Grafana.
 
 ![DC/OS pulsar dashboard](/assets/dcos_metrics.png)
 
 
 ## Uninstall Pulsar
 
-You can shut down and uninstall the `pulsar` application from DC/OS at any time in the following two ways:
+You can shut down and uninstall the `pulsar` application from DC/OS at any time in one of the following two ways:
 
-1. Using the DC/OS GUI, you can choose **Delete** at the right end of Pulsar group.
+1. Click the three dots at the right end of Pulsar group and choose **Delete** on the DC/OS GUI.
 
    ![DC/OS pulsar uninstall](/assets/dcos_uninstall.png)
 
-2. You can use the following command:
+2. Use the command below.
 
    ```bash
    
diff --git a/site2/website-next/versioned_docs/version-2.4.1/deploy-kubernetes.md b/site2/website-next/versioned_docs/version-2.4.1/deploy-kubernetes.md
index dc7123d..4e170dc 100644
--- a/site2/website-next/versioned_docs/version-2.4.1/deploy-kubernetes.md
+++ b/site2/website-next/versioned_docs/version-2.4.1/deploy-kubernetes.md
@@ -2,7 +2,6 @@
 id: deploy-kubernetes
 title: Deploy Pulsar on Kubernetes
 sidebar_label: "Kubernetes"
-original_id: deploy-kubernetes
 ---
 
 To get up and running with these charts as fast as possible, in a **non-production** use case, we provide
diff --git a/site2/website-next/versioned_docs/version-2.4.1/deploy-monitoring.md b/site2/website-next/versioned_docs/version-2.4.1/deploy-monitoring.md
index 074ce3f..95ccdd6 100644
--- a/site2/website-next/versioned_docs/version-2.4.1/deploy-monitoring.md
+++ b/site2/website-next/versioned_docs/version-2.4.1/deploy-monitoring.md
@@ -2,7 +2,6 @@
 id: deploy-monitoring
 title: Monitor
 sidebar_label: "Monitor"
-original_id: deploy-monitoring
 ---
 
 You can use different ways to monitor a Pulsar cluster, exposing both metrics related to the usage of topics and the overall health of the individual components of the cluster.
@@ -127,17 +126,7 @@ The per-topic dashboard instructions are available at [Pulsar manager](administr
 
 You can use grafana to create dashboard driven by the data that is stored in Prometheus.
 
-When you deploy Pulsar on Kubernetes, a `pulsar-grafana` Docker image is enabled by default. You can use the docker image with the principal dashboards.
-
-Enter the command below to use the dashboard manually:
-
-```shell
-
-docker run -p3000:3000 \
-        -e PROMETHEUS_URL=http://$PROMETHEUS_HOST:9090/ \
-        apachepulsar/pulsar-grafana:latest
-
-```
+When you deploy Pulsar on Kubernetes with the Pulsar Helm Chart, a `pulsar-grafana` Docker image is enabled by default. You can use the docker image with the principal dashboards.
 
 The following are some Grafana dashboards examples:
 
@@ -145,4 +134,4 @@ The following are some Grafana dashboards examples:
 - [apache-pulsar-grafana-dashboard](https://github.com/streamnative/apache-pulsar-grafana-dashboard): a collection of Grafana dashboard templates for different Pulsar components running on both Kubernetes and on-premise machines.
 
 ## Alerting rules
-You can set alerting rules according to your Pulsar environment. To configure alerting rules for Apache Pulsar, refer to [alerting rules](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/).
+You can set alerting rules according to your Pulsar environment. To configure alerting rules for Apache Pulsar, refer to [alerting rules](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/).
\ No newline at end of file
diff --git a/site2/website-next/versioned_docs/version-2.4.1/develop-binary-protocol.md b/site2/website-next/versioned_docs/version-2.4.1/develop-binary-protocol.md
index b233f10..fa03383 100644
--- a/site2/website-next/versioned_docs/version-2.4.1/develop-binary-protocol.md
+++ b/site2/website-next/versioned_docs/version-2.4.1/develop-binary-protocol.md
@@ -2,7 +2,6 @@
 id: develop-binary-protocol
 title: Pulsar binary protocol specification
 sidebar_label: "Binary protocol"
-original_id: develop-binary-protocol
 ---
 
 Pulsar uses a custom binary protocol for communications between producers/consumers and brokers. This protocol is designed to support required features, such as acknowledgements and flow control, while ensuring maximum transport and implementation efficiency.
@@ -29,42 +28,63 @@ The Pulsar protocol allows for two types of commands:
 
 Simple (payload-free) commands have this basic structure:
 
-| Component   | Description                                                                             | Size (in bytes) |
-|:------------|:----------------------------------------------------------------------------------------|:----------------|
-| totalSize   | The size of the frame, counting everything that comes after it (in bytes)               | 4               |
-| commandSize | The size of the protobuf-serialized command                                             | 4               |
-| message     | The protobuf message serialized in a raw binary format (rather than in protobuf format) |                 |
+| Component     | Description                                                                             | Size (in bytes) |
+|:--------------|:----------------------------------------------------------------------------------------|:----------------|
+| `totalSize`   | The size of the frame, counting everything that comes after it (in bytes)               | 4               |
+| `commandSize` | The size of the protobuf-serialized command                                             | 4               |
+| `message`     | The protobuf message serialized in a raw binary format (rather than in protobuf format) |                 |
 
 ### Payload commands
 
 Payload commands have this basic structure:
 
-| Component    | Description                                                                                 | Size (in bytes) |
-|:-------------|:--------------------------------------------------------------------------------------------|:----------------|
-| totalSize    | The size of the frame, counting everything that comes after it (in bytes)                   | 4               |
-| commandSize  | The size of the protobuf-serialized command                                                 | 4               |
-| message      | The protobuf message serialized in a raw binary format (rather than in protobuf format)     |                 |
-| magicNumber  | A 2-byte byte array (`0x0e01`) identifying the current format                               | 2               |
-| checksum     | A [CRC32-C checksum](http://www.evanjones.ca/crc32c.html) of everything that comes after it | 4               |
-| metadataSize | The size of the message [metadata](#message-metadata)                                       | 4               |
-| metadata     | The message [metadata](#message-metadata) stored as a binary protobuf message               |                 |
-| payload      | Anything left in the frame is considered the payload and can include any sequence of bytes  |                 |
+| Component                          | Required or optional| Description                                                                                 | Size (in bytes) |
+|:-----------------------------------|:----------|:--------------------------------------------------------------------------------------------|:----------------|
+| `totalSize`                        | Required  | The size of the frame, counting everything that comes after it (in bytes)                   | 4               |
+| `commandSize`                      | Required  | The size of the protobuf-serialized command                                                 | 4               |
+| `message`                          | Required  | The protobuf message serialized in a raw binary format (rather than in protobuf format)     |                 |
+| `magicNumberOfBrokerEntryMetadata` | Optional  | A 2-byte byte array (`0x0e02`) identifying the broker entry metadata   <br /> **Note**: `magicNumberOfBrokerEntryMetadata` , `brokerEntryMetadataSize`, and `brokerEntryMetadata` should be used **together**.                     | 2               |
+| `brokerEntryMetadataSize`          | Optional  | The size of the broker entry metadata                                                       | 4               |
+| `brokerEntryMetadata`              | Optional  | The broker entry metadata stored as a binary protobuf message                               |                 |
+| `magicNumber`                      | Required  | A 2-byte byte array (`0x0e01`) identifying the current format                               | 2               |
+| `checksum`                         | Required  | A [CRC32-C checksum](http://www.evanjones.ca/crc32c.html) of everything that comes after it | 4               |
+| `metadataSize`                     | Required  | The size of the message [metadata](#message-metadata)                                       | 4               |
+| `metadata`                         | Required  | The message [metadata](#message-metadata) stored as a binary protobuf message               |                 |
+| `payload`                          | Required  | Anything left in the frame is considered the payload and can include any sequence of bytes  |                 |
+
+## Broker entry metadata
+
+Broker entry metadata is stored alongside the message metadata as a serialized protobuf message.
+It is created by the broker when the message arrived at the broker and passed without changes to the consumer if configured.
+
+| Field              | Required or optional       | Description                                                                                                                   |
+|:-------------------|:----------------|:------------------------------------------------------------------------------------------------------------------------------|
+| `broker_timestamp` | Optional        | The timestamp when a message arrived at the broker (`id est` as the number of milliseconds since January 1st, 1970 in UTC)      |
+| `index`            | Optional        | The index of the message. It is assigned by the broker.
+
+If you want to use broker entry metadata for **brokers**, configure the [`brokerEntryMetadataInterceptors`](reference-configuration.md#broker) parameter in the `broker.conf` file.
+
+If you want to use broker entry metadata for **consumers**:
+
+1. Use the client protocol version [18 or later](https://github.com/apache/pulsar/blob/ca37e67211feda4f7e0984e6414e707f1c1dfd07/pulsar-common/src/main/proto/PulsarApi.proto#L259).
+   
+2. Configure the [`brokerEntryMetadataInterceptors`](reference-configuration.md#broker) parameter and set the [`enableExposingBrokerEntryMetadataToClient`](reference-configuration.md#broker) parameter to `true` in the `broker.conf` file.
 
 ## Message metadata
 
-Message metadata is stored alongside the application-specified payload as a serialized protobuf message. Metadata is created by the producer and passed on unchanged to the consumer.
+Message metadata is stored alongside the application-specified payload as a serialized protobuf message. Metadata is created by the producer and passed without changes to the consumer.
 
-| Field                                | Description                                                                                                                                                                                                                                               |
-|:-------------------------------------|:----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| `producer_name`                      | The name of the producer that published the message                                                                                                                                                                                         |
-| `sequence_id`                        | The sequence ID of the message, assigned by producer                                                                                                                                                                                        |
-| `publish_time`                       | The publish timestamp in Unix time (i.e. as the number of milliseconds since January 1st, 1970 in UTC)                                                                                                                                                    |
-| `properties`                         | A sequence of key/value pairs (using the [`KeyValue`](https://github.com/apache/pulsar/blob/master/pulsar-common/src/main/proto/PulsarApi.proto#L32) message). These are application-defined keys and values with no special meaning to Pulsar. |
-| `replicated_from` *(optional)*       | Indicates that the message has been replicated and specifies the name of the [cluster](reference-terminology.md#cluster) where the message was originally published                                                                                                             |
-| `partition_key` *(optional)*         | While publishing on a partition topic, if the key is present, the hash of the key is used to determine which partition to choose                                                                                                                          |
-| `compression` *(optional)*           | Signals that payload has been compressed and with which compression library                                                                                                                                                                               |
-| `uncompressed_size` *(optional)*     | If compression is used, the producer must fill the uncompressed size field with the original payload size                                                                                                                                                 |
-| `num_messages_in_batch` *(optional)* | If this message is really a [batch](#batch-messages) of multiple entries, this field must be set to the number of messages in the batch                                                                                                                   |
+| Field                    | Required or optional | Description                                                                                                                                                                                                                                               |
+|:-------------------------|:----------|:----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `producer_name`          | Required  | The name of the producer that published the message                                                                                                                                                                                         |
+| `sequence_id`            | Required  | The sequence ID of the message, assigned by producer                                                                                                                                                                                        |
+| `publish_time`           | Required  | The publish timestamp in Unix time (i.e. as the number of milliseconds since January 1st, 1970 in UTC)                                                                                                                                                    |
+| `properties`             | Required  | A sequence of key/value pairs (using the [`KeyValue`](https://github.com/apache/pulsar/blob/master/pulsar-common/src/main/proto/PulsarApi.proto#L32) message). These are application-defined keys and values with no special meaning to Pulsar. |
+| `replicated_from`        | Optional  |  Indicates that the message has been replicated and specifies the name of the [cluster](reference-terminology.md#cluster) where the message was originally published                                                                                                             |
+| `partition_key`          | Optional  | While publishing on a partition topic, if the key is present, the hash of the key is used to determine which partition to choose. Partition key is used as the message key.                                                                                                                          |
+| `compression`            | Optional  | Signals that payload has been compressed and with which compression library                                                                                                                                                                               |
+| `uncompressed_size`      | Optional  | If compression is used, the producer must fill the uncompressed size field with the original payload size                                                                                                                                                 |
+| `num_messages_in_batch`  | Optional  | If this message is really a [batch](#batch-messages) of multiple entries, this field must be set to the number of messages in the batch                                                                                                                   |
 
 ### Batch messages
 
@@ -76,19 +96,19 @@ object.
 For a single batch, the payload format will look like this:
 
 
-| Field         | Description                                                 |
-|:--------------|:------------------------------------------------------------|
-| metadataSizeN | The size of the single message metadata serialized Protobuf |
-| metadataN     | Single message metadata                                     |
-| payloadN      | Message payload passed by application                       |
+| Field           | Required or optional | Description                                                |
+|:----------------|:---------------------|:-----------------------------------------------------------|
+| `metadataSizeN` | Required             |The size of the single message metadata serialized Protobuf |
+| `metadataN`     | Required             |Single message metadata                                     |
+| `payloadN`      | Required             |Message payload passed by application                       |
 
 Each metadata field looks like this;
 
-| Field                      | Description                                             |
-|:---------------------------|:--------------------------------------------------------|
-| properties                 | Application-defined properties                          |
-| partition key *(optional)* | Key to indicate the hashing to a particular partition   |
-| payload_size               | Size of the payload for the single message in the batch |
+| Field           | Required or optional  | Description                                             |
+|:----------------|:----------------------|:--------------------------------------------------------|
+| `properties`    | Required              | Application-defined properties                          |
+| `partition key` | Optional              | Key to indicate the hashing to a particular partition   |
+| `payload_size`  | Required              | Size of the payload for the single message in the batch |
 
 When compression is enabled, the whole batch will be compressed at once.
 
@@ -170,6 +190,10 @@ messages to the broker, referring to the producer id negotiated before.
 
 ![Producer interaction](/assets/binary-protocol-producer.png)
 
+If the client does not receive a response indicating producer creation success or failure,
+the client should first send a command to close the original producer before sending a
+command to re-attempt producer creation.
+
 ##### Command Producer
 
 ```protobuf
@@ -273,6 +297,11 @@ When receiving a `CloseProducer` command, the broker will stop accepting any
 more messages for the producer, wait until all pending messages are persisted
 and then reply `Success` to the client.
 
+If the client does not receive a response to a `Producer` command within a timeout,
+the client must first send a `CloseProducer` command before sending another
+`Producer` command. The client does not need to await a response to the `CloseProducer`
+command before sending the next `Producer` command.
+
 The broker can send a `CloseProducer` command to client when it's performing
 a graceful failover (eg: broker is being restarted, or the topic is being unloaded
 by load balancer to be transferred to a different broker).
diff --git a/site2/website-next/versioned_docs/version-2.4.1/develop-load-manager.md b/site2/website-next/versioned_docs/version-2.4.1/develop-load-manager.md
index 509209b..9687f30 100644
--- a/site2/website-next/versioned_docs/version-2.4.1/develop-load-manager.md
+++ b/site2/website-next/versioned_docs/version-2.4.1/develop-load-manager.md
@@ -2,7 +2,6 @@
 id: develop-load-manager
 title: Modular load manager
 sidebar_label: "Modular load manager"
-original_id: develop-load-manager
 ---
 
 The *modular load manager*, implemented in  [`ModularLoadManagerImpl`](https://github.com/apache/pulsar/blob/master/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/ModularLoadManagerImpl.java), is a flexible alternative to the previously implemented load manager, [`SimpleLoadManagerImpl`](https://github.com/apache/pulsar/blob/master/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/SimpleLoadManagerImpl.java), which attempts to simplify how load  [...]
diff --git a/site2/website-next/versioned_docs/version-2.4.1/develop-tools.md b/site2/website-next/versioned_docs/version-2.4.1/develop-tools.md
index b545779..d034926 100644
--- a/site2/website-next/versioned_docs/version-2.4.1/develop-tools.md
+++ b/site2/website-next/versioned_docs/version-2.4.1/develop-tools.md
@@ -2,7 +2,6 @@
 id: develop-tools
 title: Simulation tools
 sidebar_label: "Simulation tools"
-original_id: develop-tools
 ---
 
 It is sometimes necessary create an test environment and incur artificial load to observe how well load managers
diff --git a/site2/website-next/versioned_docs/version-2.4.1/io-cdc.md b/site2/website-next/versioned_docs/version-2.4.1/io-cdc.md
index 20f16ae..df37397 100644
--- a/site2/website-next/versioned_docs/version-2.4.1/io-cdc.md
+++ b/site2/website-next/versioned_docs/version-2.4.1/io-cdc.md
@@ -2,7 +2,6 @@
 id: io-cdc
 title: CDC connector
 sidebar_label: "CDC connector"
-original_id: io-cdc
 ---
 
 CDC source connectors capture log changes of databases (such as MySQL, MongoDB, and PostgreSQL) into Pulsar.
diff --git a/site2/website-next/versioned_docs/version-2.4.1/io-overview.md b/site2/website-next/versioned_docs/version-2.4.1/io-overview.md
index 68960a8..810de78 100644
--- a/site2/website-next/versioned_docs/version-2.4.1/io-overview.md
+++ b/site2/website-next/versioned_docs/version-2.4.1/io-overview.md
@@ -2,7 +2,6 @@
 id: io-overview
 title: Pulsar connector overview
 sidebar_label: "Overview"
-original_id: io-overview
 ---
 
 import Tabs from '@theme/Tabs';
diff --git a/site2/website-next/versioned_docs/version-2.4.1/io-quickstart.md b/site2/website-next/versioned_docs/version-2.4.1/io-quickstart.md
index acab1eb..10a0380 100644
--- a/site2/website-next/versioned_docs/version-2.4.1/io-quickstart.md
+++ b/site2/website-next/versioned_docs/version-2.4.1/io-quickstart.md
@@ -522,7 +522,7 @@ Now that we have a MySQL running locally. In this section, we will configure a J
    
    To run a JDBC sink connector, you need to prepare a yaml config file including the information that Pulsar IO runtime needs to know. For example, how Pulsar IO can find the MySQL cluster, what is the JDBCURL and the table that Pulsar IO will use for writing messages to.
 
-   Create a _pulsar-mysql-jdbc-sink.yaml_ file, copy the following contents to this file, and place the file in the `pulsar/connectors` folder.
+   Create a _pulsar-mysql-jdbc-sink.yaml_ file , copy the following contents to this file, and place the file in the `pulsar/connectors` folder.
 
    ```text
    
diff --git a/site2/website-next/versioned_docs/version-2.4.1/pulsar-2.0.md b/site2/website-next/versioned_docs/version-2.4.1/pulsar-2.0.md
index 11c5e66..560c8c1 100644
--- a/site2/website-next/versioned_docs/version-2.4.1/pulsar-2.0.md
+++ b/site2/website-next/versioned_docs/version-2.4.1/pulsar-2.0.md
@@ -2,7 +2,6 @@
 id: pulsar-2.0
 title: Pulsar 2.0
 sidebar_label: "Pulsar 2.0"
-original_id: pulsar-2.0
 ---
 
 Pulsar 2.0 is a major new release for Pulsar that brings some bold changes to the platform, including [simplified topic names](#topic-names), the addition of the [Pulsar Functions](functions-overview) feature, some terminology changes, and more.
diff --git a/site2/website-next/versioned_docs/version-2.4.1/reference-pulsar-admin.md b/site2/website-next/versioned_docs/version-2.4.1/pulsar-admin.md
similarity index 100%
rename from site2/website-next/versioned_docs/version-2.4.1/reference-pulsar-admin.md
rename to site2/website-next/versioned_docs/version-2.4.1/pulsar-admin.md
diff --git a/site2/website-next/versioned_docs/version-2.4.1/reference-cli-tools.md b/site2/website-next/versioned_docs/version-2.4.1/reference-cli-tools.md
index 8c2c64f..0c8aea1 100644
--- a/site2/website-next/versioned_docs/version-2.4.1/reference-cli-tools.md
+++ b/site2/website-next/versioned_docs/version-2.4.1/reference-cli-tools.md
@@ -2,7 +2,6 @@
 id: reference-cli-tools
 title: Pulsar command-line tools
 sidebar_label: "Pulsar CLI tools"
-original_id: reference-cli-tools
 ---
 
 Pulsar offers several command-line tools that you can use for managing Pulsar installations, performance testing, using command-line producers and consumers, and more.
@@ -16,8 +15,12 @@ All Pulsar command-line tools can be run from the `bin` directory of your [insta
 * [`bookkeeper`](#bookkeeper)
 * [`broker-tool`](#broker-tool)
 
-> ### Getting help
-> You can get help for any CLI tool, command, or subcommand using the `--help` flag, or `-h` for short. Here's an example:
+> **Important** 
+>
+> - This page only shows **some frequently used commands**. For the latest information about `pulsar`, `pulsar-client`, and `pulsar-perf`, including commands, flags, descriptions, and more information, see [Pulsar tools](https://pulsar.apache.org/tools/).
+>  
+> - You can get help for any CLI tool, command, or subcommand using the `--help` flag, or `-h` for short. Here's an example:
+> 
 
 > ```shell
 > 
@@ -45,7 +48,6 @@ Commands:
 * `bookie`
 * `broker`
 * `compact-topic`
-* `discovery`
 * `configuration-store`
 * `initialize-cluster-metadata`
 * `proxy`
@@ -53,6 +55,7 @@ Commands:
 * `websocket`
 * `zookeeper`
 * `zookeeper-shell`
+* `autorecovery`
 
 Example:
 
@@ -71,14 +74,13 @@ The table below lists the environment variables that you can use to configure th
 |`PULSAR_BOOKKEEPER_CONF`|description: Configuration file for bookie|`conf/bookkeeper.conf`|
 |`PULSAR_ZK_CONF`|Configuration file for zookeeper|`conf/zookeeper.conf`|
 |`PULSAR_CONFIGURATION_STORE_CONF`|Configuration file for the configuration store|`conf/global_zookeeper.conf`|
-|`PULSAR_DISCOVERY_CONF`|Configuration file for discovery service|`conf/discovery.conf`|
 |`PULSAR_WEBSOCKET_CONF`|Configuration file for websocket proxy|`conf/websocket.conf`|
 |`PULSAR_STANDALONE_CONF`|Configuration file for standalone|`conf/standalone.conf`|
 |`PULSAR_EXTRA_OPTS`|Extra options to be passed to the jvm||
 |`PULSAR_EXTRA_CLASSPATH`|Extra paths for Pulsar's classpath||
 |`PULSAR_PID_DIR`|Folder where the pulsar server PID file should be stored||
 |`PULSAR_STOP_TIMEOUT`|Wait time before forcefully killing the Bookie server instance if attempts to stop it are not successful||
-
+|`PULSAR_GC_LOG`|Gc options to be passed to the jvm||
 
 
 ### `bookie`
@@ -165,26 +167,6 @@ $ pulsar compact-topic --topic topic-to-compact
 
 ```
 
-### `discovery`
-
-Run a discovery server
-
-Usage
-
-```bash
-
-$ pulsar discovery
-
-```
-
-Example
-
-```bash
-
-$ PULSAR_DISCOVERY_CONF=/path/to/discovery.conf pulsar discovery
-
-```
-
 ### `configuration-store`
 
 Starts up the Pulsar configuration store
@@ -224,14 +206,14 @@ Options
 |`-ub` , `--broker-service-url`|The broker service URL for the new cluster||
 |`-tb` , `--broker-service-url-tls`|The broker service URL for the new cluster with TLS encryption||
 |`-c` , `--cluster`|Cluster name||
-|`-cs` , `--configuration-store`|The configuration store quorum connection string||
+|`-cms` , `--configuration-metadata-store`|The configuration metadata store quorum connection string||
 |`--existing-bk-metadata-service-uri`|The metadata service URI of the existing BookKeeper cluster that you want to use||
 |`-h` , `--help`|Cluster name|false|
 |`--initial-num-stream-storage-containers`|The number of storage containers of BookKeeper stream storage|16|
 |`--initial-num-transaction-coordinators`|The number of transaction coordinators assigned in a cluster|16|
 |`-uw` , `--web-service-url`|The web service URL for the new cluster||
 |`-tw` , `--web-service-url-tls`|The web service URL for the new cluster with TLS encryption||
-|`-zk` , `--zookeeper`|The local ZooKeeper quorum connection string||
+|`-md` , `--metadata-store`|The metadata store service url||
 |`--zookeeper-session-timeout-ms`|The local ZooKeeper session timeout. The time unit is in millisecond(ms)|30000|
 
 
@@ -355,6 +337,23 @@ Options
 |`-c`, `--conf`|Configuration file for ZooKeeper||
 |`-server`|Configuration zk address, eg: `127.0.0.1:2181`||
 
+### `autorecovery`
+
+Runs an auto-recovery service.
+
+Usage
+
+```bash
+
+$ pulsar autorecovery options
+
+```
+
+Options
+
+|Flag|Description|Default|
+|---|---|---|
+|`-c`, `--conf`|Configuration for the autorecovery|N/A|
 
 
 ## `pulsar-client`
@@ -407,6 +406,7 @@ Options
 |`-m`, `--messages`|Comma-separated string of messages to send; either -m or -f must be specified|[]|
 |`-n`, `--num-produce`|The number of times to send the message(s); the count of messages/files * num-produce should be below 1000|1|
 |`-r`, `--rate`|Rate (in messages per second) at which to produce; a value 0 means to produce messages as fast as possible|0.0|
+|`-db`, `--disable-batching`|Disable batch sending of messages|false|
 |`-c`, `--chunking`|Split the message and publish in chunks if the message size is larger than the allowed max size|false|
 |`-s`, `--separator`|Character to split messages string with.|","|
 |`-k`, `--key`|Message key to add|key=value string, like k1=v1,k2=v2.|
@@ -462,6 +462,7 @@ $ pulsar-daemon command
 Commands
 * `start`
 * `stop`
+* `restart`
 
 
 ### `start`
@@ -492,7 +493,14 @@ Options
 |---|---|---|
 |-force|Stop the service forcefully if not stopped by normal shutdown.|false|
 
+### `restart`
+Restart a service that has already been started.
 
+```bash
+
+$ pulsar-daemon restart service
+
+```
 
 ## `pulsar-perf`
 A tool for performance testing a Pulsar broker.
@@ -514,6 +522,7 @@ Commands
 * `monitor-brokers`
 * `simulation-client`
 * `simulation-controller`
+* `transaction`
 * `help`
 
 Environment variables
@@ -526,6 +535,7 @@ The table below lists the environment variables that you can use to configure th
 |`PULSAR_CLIENT_CONF`|Configuration file for the client|conf/client.conf|
 |`PULSAR_EXTRA_OPTS`|Extra options to be passed to the JVM||
 |`PULSAR_EXTRA_CLASSPATH`|Extra paths for Pulsar's classpath||
+|`PULSAR_GC_LOG`|Gc options to be passed to the jvm||
 
 
 ### `consume`
@@ -544,7 +554,7 @@ Options
 |Flag|Description|Default|
 |---|---|---|
 |`--auth-params`|Authentication parameters, whose format is determined by the implementation of method `configure` in authentication plugin class. For example, `key1:val1,key2:val2` or `{"key1":"val1","key2":"val2"}`.||
-|`--auth_plugin`|Authentication plugin class name||
+|`--auth-plugin`|Authentication plugin class name||
 |`-ac`, `--auto_ack_chunk_q_full`|Auto ack for the oldest message in consumer's receiver queue if the queue full|false|
 |`--listener-name`|Listener name for the broker||
 |`--acks-delay-millis`|Acknowledgements grouping delay in millis|100|
@@ -553,11 +563,13 @@ Options
 |`-v`, `--encryption-key-value-file`|The file which contains the private key to decrypt payload||
 |`-h`, `--help`|Help message|false|
 |`--conf-file`|Configuration file||
+|`-m`, `--num-messages`|Number of messages to consume in total. If the value is equal to or smaller than 0, it keeps consuming messages.|0|
 |`-e`, `--expire_time_incomplete_chunked_messages`|The expiration time for incomplete chunk messages (in milliseconds)|0|
 |`-c`, `--max-connections`|Max number of TCP connections to a single broker|100|
 |`-mc`, `--max_chunked_msg`|Max pending chunk messages|0|
 |`-n`, `--num-consumers`|Number of consumers (per topic)|1|
 |`-ioThreads`, `--num-io-threads`|Set the number of threads to be used for handling connections to brokers|1|
+|`-lt`, `--num-listener-threads`|Set the number of threads to be used for message listeners|1|
 |`-ns`, `--num-subscriptions`|Number of subscriptions (per topic)|1|
 |`-t`, `--num-topics`|The number of topics|1|
 |`-pm`, `--pool-messages`|Use the pooled message|true|
@@ -571,10 +583,21 @@ Options
 |`-ss`, `--subscriptions`|A list of subscriptions to consume on (e.g. sub1,sub2)|sub|
 |`-st`, `--subscription-type`|Subscriber type. Possible values are Exclusive, Shared, Failover, Key_Shared.|Exclusive|
 |`-sp`, `--subscription-position`|Subscriber position. Possible values are Latest, Earliest.|Latest|
-|`-time`, `--test-duration`|Test duration (in seconds). If the value is 0 or smaller than 0, it keeps consuming messages|0|
+|`-time`, `--test-duration`|Test duration (in seconds). If this value is less than or equal to 0, it keeps consuming messages.|0|
 |`--trust-cert-file`|Path for the trusted TLS certificate file||
 |`--tls-allow-insecure`|Allow insecure TLS connection||
 
+Below are **transaction** related options.
+
+If you want `--txn-timeout`, `--numMessage-perTransaction`, `-nmt`, `-ntxn`, or `-abort` take effect, set `--txn-enable` to true.
+
+|Flag|Description|Default|
+|---|---|---|
+`-tto`, `--txn-timeout`|Set the time of transaction timeout (in second). |10
+`-nmt`, `--numMessage-perTransaction`|The number of messages acknowledged by a transaction. |50
+`-txn`, `--txn-enable`|Enable or disable a transaction.|false
+`-ntxn`|The number of opened transactions. 0 means the number of transactions is unlimited. |0
+`-abort`|Abort a transaction. |true
 
 ### `produce`
 Run a producer
@@ -594,7 +617,7 @@ Options
 |`-am`, `--access-mode`|Producer access mode. Valid values are `Shared`, `Exclusive` and `WaitForExclusive`|Shared|
 |`-au`, `--admin-url`|Pulsar admin URL||
 |`--auth-params`|Authentication parameters, whose format is determined by the implementation of method `configure` in authentication plugin class. For example, `key1:val1,key2:val2` or `{"key1":"val1","key2":"val2"}`.||
-|`--auth_plugin`|Authentication plugin class name||
+|`--auth-plugin`|Authentication plugin class name||
 |`--listener-name`|Listener name for the broker||
 |`-b`, `--batch-time-window`|Batch messages in a window of the specified number of milliseconds|1|
 |`-bb`, `--batch-max-bytes`|Maximum number of bytes per batch|4194304|
@@ -613,9 +636,9 @@ Options
 |`-c`, `--max-connections`|Max number of TCP connections to a single broker|100|
 |`-o`, `--max-outstanding`|Max number of outstanding messages|1000|
 |`-p`, `--max-outstanding-across-partitions`|Max number of outstanding messages across partitions|50000|
+|`-m`, `--num-messages`|Number of messages to publish in total. If this value is less than or equal to 0, it keeps publishing messages.|0|
 |`-mk`, `--message-key-generation-mode`|The generation mode of message key. Valid options are `autoIncrement`, `random`||
 |`-ioThreads`, `--num-io-threads`|Set the number of threads to be used for handling connections to brokers|1|
-|`-m`, `--num-messages`|Number of messages to publish in total. If the value is 0 or smaller than 0, it keeps publishing messages.|0|
 |`-n`, `--num-producers`|The number of producers (per topic)|1|
 |`-threads`, `--num-test-threads`|Number of test threads|1|
 |`-t`, `--num-topic`|The number of topics|1|
@@ -629,11 +652,21 @@ Options
 |`-u`, `--service-url`|Pulsar service URL||
 |`-s`, `--size`|Message size (in bytes)|1024|
 |`-i`, `--stats-interval-seconds`|Statistics interval seconds. If 0, statistics will be disabled.|0|
-|`-time`, `--test-duration`|Test duration (in seconds). If the value is 0 or smaller than 0, it keeps publishing messages.|0|
+|`-time`, `--test-duration`|Test duration (in seconds). If this value is less than or equal to 0, it keeps publishing messages.|0|
 |`--trust-cert-file`|Path for the trusted TLS certificate file||
 |`--warmup-time`|Warm-up time in seconds|1|
 |`--tls-allow-insecure`|Allow insecure TLS connection||
 
+Below are **transaction** related options.
+
+If you want `--txn-timeout`, `--numMessage-perTransaction`, or `-abort` take effect, set `--txn-enable` to true.
+
+|Flag|Description|Default|
+|---|---|---|
+`-tto`, `--txn-timeout`|Set the time of transaction timeout (in second). |5
+`-nmt`, `--numMessage-perTransaction`|The number of messages acknowledged by a transaction. |50
+`-txn`, `--txn-enable`|Enable or disable a transaction.|true
+`-abort`|Abort a transaction. |true
 
 ### `read`
 Run a topic reader
@@ -651,19 +684,21 @@ Options
 |Flag|Description|Default|
 |---|---|---|
 |`--auth-params`|Authentication parameters, whose format is determined by the implementation of method `configure` in authentication plugin class. For example, `key1:val1,key2:val2` or `{"key1":"val1","key2":"val2"}`.||
-|`--auth_plugin`|Authentication plugin class name||
+|`--auth-plugin`|Authentication plugin class name||
 |`--listener-name`|Listener name for the broker||
 |`--conf-file`|Configuration file||
 |`-h`, `--help`|Help message|false|
+|`-n`, `--num-messages`|Number of messages to consume in total. If the value is equal to or smaller than 0, it keeps consuming messages.|0|
 |`-c`, `--max-connections`|Max number of TCP connections to a single broker|100|
 |`-ioThreads`, `--num-io-threads`|Set the number of threads to be used for handling connections to brokers|1|
+|`-lt`, `--num-listener-threads`|Set the number of threads to be used for message listeners|1|
 |`-t`, `--num-topics`|The number of topics|1|
 |`-r`, `--rate`|Simulate a slow message reader (rate in msg/s)|0|
 |`-q`, `--receiver-queue-size`|Size of the receiver queue|1000|
 |`-u`, `--service-url`|Pulsar service URL||
 |`-m`, `--start-message-id`|Start message id. This can be either 'earliest', 'latest' or a specific message id by using 'lid:eid'|earliest|
 |`-i`, `--stats-interval-seconds`|Statistics interval seconds. If 0, statistics will be disabled.|0|
-|`-time`, `--test-duration`|Test duration (in seconds). If the value is 0 or smaller than 0, it keeps consuming messages.|0|
+|`-time`, `--test-duration`|Test duration (in seconds). If this value is less than or equal to 0, it keeps consuming messages.|0|
 |`--trust-cert-file`|Path for the trusted TLS certificate file||
 |`--use-tls`|Use TLS encryption on the connection|false|
 |`--tls-allow-insecure`|Allow insecure TLS connection||
@@ -684,16 +719,19 @@ Options
 |Flag|Description|Default|
 |---|---|---|
 |`--auth-params`|Authentication parameters, whose format is determined by the implementation of method `configure` in authentication plugin class. For example, `key1:val1,key2:val2` or `{"key1":"val1","key2":"val2"}`.||
-|`--auth_plugin`|Authentication plugin class name||
+|`--auth-plugin`|Authentication plugin class name||
 |`--conf-file`|Configuration file||
 |`-h`, `--help`|Help message|false|
-|`-m`, `--num-messages`|Number of messages to publish in total. If the value is 0 or smaller than 0, it keeps publishing messages|0|
+|`-m`, `--num-messages`|Number of messages to publish in total. If this value is less than or equal to 0, it keeps publishing messages.|0|
 |`-t`, `--num-topic`|The number of topics|1|
 |`-f`, `--payload-file`|Use payload from a file instead of empty buffer||
+|`-e`, `--payload-delimiter`|The delimiter used to split lines when using payload from a file|\n|
+|`-fp`, `--format-payload`|Format %i as a message index in the stream from producer and/or %t as the timestamp nanoseconds|false|
+|`-fc`, `--format-class`|Custom formatter class name|`org.apache.pulsar.testclient.DefaultMessageFormatter`|
 |`-u`, `--proxy-url`|Pulsar Proxy URL, e.g., "ws://localhost:8080/"||
 |`-r`, `--rate`|Publish rate msg/s across topics|100|
 |`-s`, `--size`|Message size in byte|1024|
-|`-time`, `--test-duration`|Test duration (in seconds). If the value is 0 or smaller than 0, it keeps publishing messages|0|
+|`-time`, `--test-duration`|Test duration (in seconds). If this value is less than or equal to 0, it keeps publishing messages.|0|
 
 
 ### `managed-ledger`
@@ -717,11 +755,11 @@ Options
 |`-h`, `--help`|Help message|false|
 |`-c`, `--max-connections`|Max number of TCP connections to a single bookie|1|
 |`-o`, `--max-outstanding`|Max number of outstanding requests|1000|
-|`-m`, `--num-messages`|Number of messages to publish in total. If the value is 0 or smaller than 0, it keeps publishing messages|0|
+|`-m`, `--num-messages`|Number of messages to publish in total. If this value is less than or equal to 0, it keeps publishing messages.|0|
 |`-t`, `--num-topic`|Number of managed ledgers|1|
 |`-r`, `--rate`|Write rate msg/s across managed ledgers|100|
 |`-s`, `--size`|Message size in byte|1024|
-|`-time`, `--test-duration`|Test duration (in seconds). If the value is 0 or smaller than 0, it keeps publishing messages|0|
+|`-time`, `--test-duration`|Test duration (in seconds). If this value is less than or equal to 0, it keeps publishing messages.|0|
 |`--threads`|Number of threads writing|1|
 |`-w`, `--write-quorum`|Ledger write quorum|1|
 |`-zk`, `--zookeeperServers`|ZooKeeper connection string||
@@ -785,6 +823,45 @@ Options
 |`--cluster`|The cluster to test on||
 |`-h`, `--help`|Help message|false|
 
+### `transaction`
+
+Run a transaction. For more information, see [Pulsar transactions](txn-why).
+
+**Usage**
+
+```bash
+
+$ pulsar-perf transaction options
+
+```
+
+**Options**
+
+|Flag|Description|Default|
+|---|---|---|
+`-au`, `--admin-url`|Pulsar admin URL.|N/A
+`--conf-file`|Configuration file.|N/A
+`-h`, `--help`|Help messages.|N/A
+`-c`, `--max-connections`|Maximum number of TCP connections to a single broker.|100
+`-ioThreads`, `--num-io-threads`|Set the number of threads to be used for handling connections to brokers. |1
+`-ns`, `--num-subscriptions`|Number of subscriptions per topic.|1
+`-threads`, `--num-test-threads`|Number of test threads. <br /><br />This thread is for a new transaction to ack messages from consumer topics, produce messages to producer topics, and commit or abort this transaction. <br /><br /> Increasing the number of threads increases the parallelism of the performance test, consequently, it increases the intensity of the stress test.|1
+`-nmc`, `--numMessage-perTransaction-consume`|Set the number of messages consumed in a transaction. <br /><br /> If transaction is disabled, it means the number of messages consumed in a task instead of in a transaction.|1
+`-nmp`, `--numMessage-perTransaction-produce`|Set the number of messages produced in a transaction. <br /><br />If transaction is disabled, it means the number of messages produced in a task instead of in a transaction.|1
+`-ntxn`, `--number-txn`|Set the number of transactions. <br /><br /> 0 means the number of transactions is unlimited. <br /><br /> If transaction is disabled, it means the number of tasks instead of transactions. |0
+`-np`, `--partitions`|Create partitioned topics with a given number of partitions. <br /><br /> 0 means not trying to create a topic.
+`-q`, `--receiver-queue-size`|Size of the receiver queue.|1000
+`-u`, `--service-url`|Pulsar service URL.|N/A
+`-sp`, `--subscription-position`|Subscription position.|Earliest
+`-st`, `--subscription-type`|Subscription type.|Shared
+`-ss`, `--subscriptions`|A list of subscriptions to consume. <br /><br /> For example, sub1,sub2.|[sub]
+`-time`, `--test-duration`|Test duration (in second). <br /><br /> 0 means keeping publishing messages.|0
+`--topics-c`|All topics assigned to consumers.|[test-consume]
+`--topics-p`|All topics assigned to producers . |[test-produce]
+`--txn-disEnable`|Disable transaction.|true
+`-tto`, `--txn-timeout`|Set the time of transaction timeout (in second). <br /><br /> If you want `--txn-timeout` takes effect, set `--txn-enable` to true.|5
+`-abort`|Abort the transaction. <br /><br /> If you want `-abort` takes effect, set `--txn-disEnable` to false.|true
+`-txnRate`|Set the rate of opened transactions or tasks. <br /><br /> 0 means no limit.|0
 
 ### `help`
 This help message
@@ -809,7 +886,7 @@ $ bookkeeper command
 ```
 
 Commands
-* `auto-recovery`
+* `autorecovery`
 * `bookie`
 * `localbookie`
 * `upgrade`
@@ -829,16 +906,17 @@ The table below lists the environment variables that you can use to configure th
 |ENTRY_FORMATTER_CLASS|The Java class used to format entries||
 |BOOKIE_PID_DIR|Folder where the BookKeeper server PID file should be stored||
 |BOOKIE_STOP_TIMEOUT|Wait time before forcefully killing the Bookie server instance if attempts to stop it are not successful||
+|BOOKIE_GC_LOG|Gc options to be passed to the jvm||
 
 
-### `auto-recovery`
-Runs an auto-recovery service daemon
+### `autorecovery`
+Runs an auto-recovery service
 
 Usage
 
 ```bash
 
-$ bookkeeper auto-recovery options
+$ bookkeeper autorecovery options
 
 ```
 
@@ -846,7 +924,7 @@ Options
 
 |Flag|Description|Default|
 |---|---|---|
-|`-c`, `--conf`|Configuration for the auto-recovery daemon||
+|`-c`, `--conf`|Configuration for the auto-recovery||
 
 
 ### `bookie`
@@ -864,7 +942,7 @@ Options
 
 |Flag|Description|Default|
 |---|---|---|
-|`-c`, `--conf`|Configuration for the auto-recovery daemon||
+|`-c`, `--conf`|Configuration for the auto-recovery||
 |-readOnly|Force start a read-only bookie server|false|
 |-withAutoRecovery|Start auto-recovery service bookie server|false|
 
@@ -895,7 +973,7 @@ Options
 
 |Flag|Description|Default|
 |---|---|---|
-|`-c`, `--conf`|Configuration for the auto-recovery daemon||
+|`-c`, `--conf`|Configuration for the auto-recovery||
 |`-u`, `--upgrade`|Upgrade the bookie’s directories||
 
 
diff --git a/site2/website-next/versioned_docs/version-2.4.1/reference-terminology.md b/site2/website-next/versioned_docs/version-2.4.1/reference-terminology.md
index d0e7368..ebc114d 100644
--- a/site2/website-next/versioned_docs/version-2.4.1/reference-terminology.md
+++ b/site2/website-next/versioned_docs/version-2.4.1/reference-terminology.md
@@ -2,7 +2,6 @@
 id: reference-terminology
 title: Pulsar Terminology
 sidebar_label: "Terminology"
-original_id: reference-terminology
 ---
 
 Here is a glossary of terms related to Apache Pulsar:
diff --git a/site2/website-next/versioned_docs/version-2.4.1/security-encryption.md b/site2/website-next/versioned_docs/version-2.4.1/security-encryption.md
index cc43082..90d0dbe 100644
--- a/site2/website-next/versioned_docs/version-2.4.1/security-encryption.md
+++ b/site2/website-next/versioned_docs/version-2.4.1/security-encryption.md
@@ -2,7 +2,6 @@
 id: security-encryption
 title: Pulsar Encryption
 sidebar_label: "End-to-End Encryption"
-original_id: security-encryption
 ---
 
 import Tabs from '@theme/Tabs';
diff --git a/site2/website-next/versioned_docs/version-2.4.1/security-extending.md b/site2/website-next/versioned_docs/version-2.4.1/security-extending.md
index c088e3a..dd0030e 100644
--- a/site2/website-next/versioned_docs/version-2.4.1/security-extending.md
+++ b/site2/website-next/versioned_docs/version-2.4.1/security-extending.md
@@ -2,7 +2,6 @@
 id: security-extending
 title: Extending Authentication and Authorization in Pulsar
 sidebar_label: "Extending"
-original_id: security-extending
 ---
 
 Pulsar provides a way to use custom authentication and authorization mechanisms.
diff --git a/site2/website-next/versioned_docs/version-2.4.1/security-jwt.md b/site2/website-next/versioned_docs/version-2.4.1/security-jwt.md
index f12c5b6..8dd8d80 100644
--- a/site2/website-next/versioned_docs/version-2.4.1/security-jwt.md
+++ b/site2/website-next/versioned_docs/version-2.4.1/security-jwt.md
@@ -2,7 +2,6 @@
 id: security-jwt
 title: Client authentication using tokens based on JSON Web Tokens
 sidebar_label: "Authentication using JWT"
-original_id: security-jwt
 ---
 
 import Tabs from '@theme/Tabs';
diff --git a/site2/website-next/versioned_docs/version-2.4.1/security-token-admin.md b/site2/website-next/versioned_docs/version-2.4.1/security-token-admin.md
index 6736097..9424e83 100644
--- a/site2/website-next/versioned_docs/version-2.4.1/security-token-admin.md
+++ b/site2/website-next/versioned_docs/version-2.4.1/security-token-admin.md
@@ -2,7 +2,6 @@
 id: security-token-admin
 title: Token authentication admin
 sidebar_label: "Token authentication admin"
-original_id: security-token-admin
 ---
 
 ## Token Authentication Overview
diff --git a/site2/website-next/versioned_docs/version-2.4.1/sql-deployment-configurations.md b/site2/website-next/versioned_docs/version-2.4.1/sql-deployment-configurations.md
index 6c6fd87..9e7ff5a 100644
--- a/site2/website-next/versioned_docs/version-2.4.1/sql-deployment-configurations.md
+++ b/site2/website-next/versioned_docs/version-2.4.1/sql-deployment-configurations.md
@@ -2,7 +2,6 @@
 id: sql-deployment-configurations
 title: Pulsar SQL configuration and deployment
 sidebar_label: "Configuration and deployment"
-original_id: sql-deployment-configurations
 ---
 
 You can configure Presto Pulsar connector and deploy a cluster with the following instruction.
@@ -27,6 +26,84 @@ pulsar.entry-read-batch-size=100
 # default number of splits to use per query
 pulsar.target-num-splits=4
 
+# max size of one batch message (default value is 5MB)
+pulsar.max-message-size=5242880
+
+# number of split used when querying data from pulsar
+pulsar.target-num-splits=2
+
+# size of queue to buffer entry read from pulsar
+pulsar.max-split-entry-queue-size=1000
+
+# size of queue to buffer message extract from entries
+pulsar.max-split-message-queue-size=10000
+
+# status provider to record connector metrics
+pulsar.stats-provider=org.apache.bookkeeper.stats.NullStatsProvider
+
+# config in map format for stats provider e.g. {"key1":"val1","key2":"val2"}
+pulsar.stats-provider-configs={}
+
+# whether to rewrite Pulsar's default topic delimiter '/'
+pulsar.namespace-delimiter-rewrite-enable=false
+
+# delimiter used to rewrite Pulsar's default delimiter '/', use if default is causing incompatibility with other system like Superset
+pulsar.rewrite-namespace-delimiter=“/”
+
+# maximum number of thread pool size for ledger offloader.
+pulsar.managed-ledger-offload-max-threads=2
+
+# driver used to offload or read cold data to or from long-term storage
+pulsar.managed-ledger-offload-driver=null
+
+# directory to load offloaders nar file.
+pulsar.offloaders-directory="./offloaders"
+
+# properties and configurations related to specific offloader implementation as map e.g. {"key1":"val1","key2":"val2"}
+pulsar.offloader-properties={}
+
+# authentication plugin used to authenticate to Pulsar cluster
+pulsar.auth-plugin=null
+
+# authentication parameter used to authenticate to the Pulsar cluster as a string e.g. "key1:val1,key2:val2".
+pulsar.auth-params=null
+
+# whether the Pulsar client accept an untrusted TLS certificate from broker
+pulsar.tls-allow-insecure-connection=null
+
+# whether to allow hostname verification when a client connects to broker over TLS.
+pulsar.tls-hostname-verification-enable=null
+
+# path for the trusted TLS certificate file of Pulsar broker
+pulsar.tls-trust-cert-file-path=null
+
+# set the threshold for BookKeeper request throttle, default is disabled
+pulsar.bookkeeper-throttle-value=0
+
+# set the number of IO thread
+pulsar.bookkeeper-num-io-threads=2 * Runtime.getRuntime().availableProcessors()
+
+# set the number of worker thread
+pulsar.bookkeeper-num-worker-threads=Runtime.getRuntime().availableProcessors()
+
+# whether to use BookKeeper V2 wire protocol
+pulsar.bookkeeper-use-v2-protocol=true
+
+# interval to check the need for sending an explicit LAC, default is disabled
+pulsar.bookkeeper-explicit-interval=0
+
+# size for managed ledger entry cache (in MB).
+pulsar.managed-ledger-cache-size-MB=0
+
+# number of threads to be used for managed ledger tasks dispatching
+pulsar.managed-ledger-num-worker-threads=Runtime.getRuntime().availableProcessors()
+
+# number of threads to be used for managed ledger scheduled tasks
+pulsar.managed-ledger-num-scheduler-threads=Runtime.getRuntime().availableProcessors()
+
+# directory used to store extraction NAR file
+pulsar.nar-extraction-directory=System.getProperty("java.io.tmpdir")
+
 ```
 
 You can connect Presto to a Pulsar cluster with multiple hosts. To configure multiple hosts for brokers, add multiple URLs to `pulsar.web-service-url`. To configure multiple hosts for ZooKeeper, add multiple URIs to `pulsar.zookeeper-uri`. The following is an example.
@@ -38,6 +115,21 @@ pulsar.zookeeper-uri=localhost1,localhost2:2181
 
 ```
 
+A frequently asked question is why my latest message not showing up when querying with Pulsar SQL.
+It's not a bug but controlled by a setting, by default BookKeeper LAC only advanced when subsequent entries are added.
+If there is no subsequent entries added, the last entry written will not be visible to readers until the ledger is closed.
+This is not a problem for Pulsar which uses managed ledger, but Pulsar SQL directly read from BookKeeper ledger.
+We can add following setting to change the behavior:
+In Broker config, set
+bookkeeperExplicitLacIntervalInMills > 0
+bookkeeperUseV2WireProtocol=false
+
+And in Presto config, set
+pulsar.bookkeeper-explicit-interval > 0
+pulsar.bookkeeper-use-v2-protocol=false
+
+However,keep in mind that using bk V3 protocol will introduce additional GC overhead to BK as it uses Protobuf.
+
 ## Query data from existing Presto clusters
 
 If you already have a Presto cluster, you can copy the Presto Pulsar connector plugin to your existing cluster. Download the archived plugin package with the following command.
diff --git a/site2/website-next/versioned_docs/version-2.4.1/sql-getting-started.md b/site2/website-next/versioned_docs/version-2.4.1/sql-getting-started.md
index 8a5cd71..6a2d873 100644
--- a/site2/website-next/versioned_docs/version-2.4.1/sql-getting-started.md
+++ b/site2/website-next/versioned_docs/version-2.4.1/sql-getting-started.md
@@ -2,7 +2,6 @@
 id: sql-getting-started
 title: Query data with Pulsar SQL
 sidebar_label: "Query data"
-original_id: sql-getting-started
 ---
 
 Before querying data in Pulsar, you need to install Pulsar and built-in connectors. 
diff --git a/site2/website-next/versioned_docs/version-2.4.1/sql-overview.md b/site2/website-next/versioned_docs/version-2.4.1/sql-overview.md
index 4a4d5f0..2f827f4 100644
--- a/site2/website-next/versioned_docs/version-2.4.1/sql-overview.md
+++ b/site2/website-next/versioned_docs/version-2.4.1/sql-overview.md
@@ -2,7 +2,6 @@
 id: sql-overview
 title: Pulsar SQL Overview
 sidebar_label: "Overview"
-original_id: sql-overview
 ---
 
 Apache Pulsar is used to store streams of event data, and the event data is structured with predefined fields. With the implementation of the [Schema Registry](schema-get-started), you can store structured data in Pulsar and query the data by using [Trino (formerly Presto SQL)](https://trino.io/).
diff --git a/site2/website-next/versioned_docs/version-2.4.1/standalone-docker.md b/site2/website-next/versioned_docs/version-2.4.1/standalone-docker.md
index 05ac2a1..7ee20c2 100644
--- a/site2/website-next/versioned_docs/version-2.4.1/standalone-docker.md
+++ b/site2/website-next/versioned_docs/version-2.4.1/standalone-docker.md
@@ -2,14 +2,11 @@
 id: standalone-docker
 title: Set up a standalone Pulsar in Docker
 sidebar_label: "Run Pulsar in Docker"
-original_id: standalone-docker
 ---
 
-For local development and testing, you can run Pulsar in standalone
-mode on your own machine within a Docker container.
+For local development and testing, you can run Pulsar in standalone mode on your own machine within a Docker container. 
 
-If you have not installed Docker, download the [Community edition](https://www.docker.com/community-edition)
-and follow the instructions for your OS.
+If you have not installed Docker, download the [Community edition](https://www.docker.com/community-edition) and follow the instructions for your OS.
 
 ## Start Pulsar in Docker
 
@@ -17,13 +14,7 @@ and follow the instructions for your OS.
 
   ```shell
   
-  $ docker run -it \
-  -p 6650:6650 \
-  -p 8080:8080 \
-  --mount source=pulsardata,target=/pulsar/data \
-  --mount source=pulsarconf,target=/pulsar/conf \
-  apachepulsar/pulsar:@pulsar:version@ \
-  bin/pulsar standalone
+  $ docker run -it -p 6650:6650  -p 8080:8080 --mount source=pulsardata,target=/pulsar/data --mount source=pulsarconf,target=/pulsar/conf apachepulsar/pulsar:@pulsar:version@ bin/pulsar standalone
   
   ```
 
@@ -36,8 +27,9 @@ If you start Pulsar successfully, you will see `INFO`-level log messages like th
 
 ```
 
-2017-08-09 22:34:04,030 - INFO  - [main:WebService@213] - Web Service started at http://127.0.0.1:8080
-2017-08-09 22:34:04,038 - INFO  - [main:PulsarService@335] - messaging service is ready, bootstrap service on port=8080, broker url=pulsar://127.0.0.1:6650, cluster=standalone, configs=org.apache.pulsar.broker.ServiceConfiguration@4db60246
+08:18:30.970 [main] INFO  org.apache.pulsar.broker.web.WebService - HTTP Service started at http://0.0.0.0:8080
+...
+07:53:37.322 [main] INFO  org.apache.pulsar.broker.PulsarService - messaging service is ready, bootstrap service port = 8080, broker url= pulsar://localhost:6650, cluster=standalone, configs=org.apache.pulsar.broker.ServiceConfiguration@98b63c1
 ...
 
 ```
@@ -60,7 +52,7 @@ use one of these root URLs to interact with your cluster:
 * `pulsar://localhost:6650`
 * `http://localhost:8080`
 
-The following example will guide you get started with Pulsar quickly by using the [Python](client-libraries-python)
+The following example will guide you get started with Pulsar quickly by using the [Python client API](client-libraries-python)
 client API.
 
 Install the Pulsar Python client library directly from [PyPI](https://pypi.org/project/pulsar-client/):
@@ -128,51 +120,93 @@ The output is something like this:
 ```json
 
 {
-  "averageMsgSize": 0.0,
-  "msgRateIn": 0.0,
-  "msgRateOut": 0.0,
-  "msgThroughputIn": 0.0,
-  "msgThroughputOut": 0.0,
-  "publishers": [
-    {
-      "address": "/172.17.0.1:35048",
-      "averageMsgSize": 0.0,
-      "clientVersion": "1.19.0-incubating",
-      "connectedSince": "2017-08-09 20:59:34.621+0000",
-      "msgRateIn": 0.0,
-      "msgThroughputIn": 0.0,
-      "producerId": 0,
-      "producerName": "standalone-0-1"
-    }
-  ],
-  "replication": {},
-  "storageSize": 16,
-  "subscriptions": {
-    "my-sub": {
-      "blockedSubscriptionOnUnackedMsgs": false,
-      "consumers": [
+    "msgRateIn": 0.0,
+    "msgThroughputIn": 0.0,
+    "msgRateOut": 1.8332950480217471,
+    "msgThroughputOut": 91.33142602871978,
+    "bytesInCounter": 7097,
+    "msgInCounter": 143,
+    "bytesOutCounter": 6607,
+    "msgOutCounter": 133,
+    "averageMsgSize": 0.0,
+    "msgChunkPublished": false,
+    "storageSize": 7097,
+    "backlogSize": 0,
+    "offloadedStorageSize": 0,
+    "publishers": [
         {
-          "address": "/172.17.0.1:35064",
-          "availablePermits": 996,
-          "blockedConsumerOnUnackedMsgs": false,
-          "clientVersion": "1.19.0-incubating",
-          "connectedSince": "2017-08-09 21:05:39.222+0000",
-          "consumerName": "166111",
-          "msgRateOut": 0.0,
-          "msgRateRedeliver": 0.0,
-          "msgThroughputOut": 0.0,
-          "unackedMessages": 0
+            "accessMode": "Shared",
+            "msgRateIn": 0.0,
+            "msgThroughputIn": 0.0,
+            "averageMsgSize": 0.0,
+            "chunkedMessageRate": 0.0,
+            "producerId": 0,
+            "metadata": {},
+            "address": "/127.0.0.1:35604",
+            "connectedSince": "2021-07-04T09:05:43.04788Z",
+            "clientVersion": "2.8.0",
+            "producerName": "standalone-2-5"
+        }
+    ],
+    "waitingPublishers": 0,
+    "subscriptions": {
+        "my-sub": {
+            "msgRateOut": 1.8332950480217471,
+            "msgThroughputOut": 91.33142602871978,
+            "bytesOutCounter": 6607,
+            "msgOutCounter": 133,
+            "msgRateRedeliver": 0.0,
+            "chunkedMessageRate": 0,
+            "msgBacklog": 0,
+            "backlogSize": 0,
+            "msgBacklogNoDelayed": 0,
+            "blockedSubscriptionOnUnackedMsgs": false,
+            "msgDelayed": 0,
+            "unackedMessages": 0,
+            "type": "Exclusive",
+            "activeConsumerName": "3c544f1daa",
+            "msgRateExpired": 0.0,
+            "totalMsgExpired": 0,
+            "lastExpireTimestamp": 0,
+            "lastConsumedFlowTimestamp": 1625389101290,
+            "lastConsumedTimestamp": 1625389546070,
+            "lastAckedTimestamp": 1625389546162,
+            "lastMarkDeleteAdvancedTimestamp": 1625389546163,
+            "consumers": [
+                {
+                    "msgRateOut": 1.8332950480217471,
+                    "msgThroughputOut": 91.33142602871978,
+                    "bytesOutCounter": 6607,
+                    "msgOutCounter": 133,
+                    "msgRateRedeliver": 0.0,
+                    "chunkedMessageRate": 0.0,
+                    "consumerName": "3c544f1daa",
+                    "availablePermits": 867,
+                    "unackedMessages": 0,
+                    "avgMessagesPerEntry": 6,
+                    "blockedConsumerOnUnackedMsgs": false,
+                    "lastAckedTimestamp": 1625389546162,
+                    "lastConsumedTimestamp": 1625389546070,
+                    "metadata": {},
+                    "address": "/127.0.0.1:35472",
+                    "connectedSince": "2021-07-04T08:58:21.287682Z",
+                    "clientVersion": "2.8.0"
+                }
+            ],
+            "isDurable": true,
+            "isReplicated": false,
+            "allowOutOfOrderDelivery": false,
+            "consumersAfterMarkDeletePosition": {},
+            "nonContiguousDeletedMessagesRanges": 0,
+            "nonContiguousDeletedMessagesRangesSerializedSize": 0,
+            "durable": true,
+            "replicated": false
         }
-      ],
-      "msgBacklog": 0,
-      "msgRateExpired": 0.0,
-      "msgRateOut": 0.0,
-      "msgRateRedeliver": 0.0,
-      "msgThroughputOut": 0.0,
-      "type": "Exclusive",
-      "unackedMessages": 0
-    }
-  }
+    },
+    "replication": {},
+    "deduplicationStatus": "Disabled",
+    "nonContiguousDeletedMessagesRanges": 0,
+    "nonContiguousDeletedMessagesRangesSerializedSize": 0
 }
 
 ```
diff --git a/site2/website-next/versioned_docs/version-2.4.1/getting-started-standalone.md b/site2/website-next/versioned_docs/version-2.4.1/standalone.md
similarity index 100%
rename from site2/website-next/versioned_docs/version-2.4.1/getting-started-standalone.md
rename to site2/website-next/versioned_docs/version-2.4.1/standalone.md
diff --git a/site2/website-next/versioned_docs/version-2.4.2/adaptors-kafka.md b/site2/website-next/versioned_docs/version-2.4.2/adaptors-kafka.md
index ad0d886..27382e9 100644
--- a/site2/website-next/versioned_docs/version-2.4.2/adaptors-kafka.md
+++ b/site2/website-next/versioned_docs/version-2.4.2/adaptors-kafka.md
@@ -2,7 +2,6 @@
 id: adaptors-kafka
 title: Pulsar adaptor for Apache Kafka
 sidebar_label: "Kafka client wrapper"
-original_id: adaptors-kafka
 ---
 
 
@@ -261,6 +260,7 @@ You can configure Pulsar authentication provider directly from the Kafka propert
 | [`pulsar.producer.batching.enabled`](http://pulsar.apache.org/api/client/org/apache/pulsar/client/api/ProducerConfiguration.html#setBatchingEnabled-boolean-) | `true` | Control whether automatic batching of messages is enabled for the producer. |
 | [`pulsar.producer.batching.max.messages`](http://pulsar.apache.org/api/client/org/apache/pulsar/client/api/ProducerConfiguration.html#setBatchingMaxMessages-int-) | `1000` | The maximum number of messages in a batch. |
 | [`pulsar.block.if.producer.queue.full`](http://pulsar.apache.org/api/client/org/apache/pulsar/client/api/ProducerConfiguration.html#setBlockIfQueueFull-boolean-) | | Specify the block producer if queue  is full. |
+| [`pulsar.crypto.reader.factory.class.name`](http://pulsar.apache.org/api/client/org/apache/pulsar/client/api/ProducerConfiguration.html#setCryptoKeyReader-org.apache.pulsar.client.api.CryptoKeyReader-) | | Specify the CryptoReader-Factory(`CryptoKeyReaderFactory`) classname which allows producer to create CryptoKeyReader. |
 
 
 ### Pulsar consumer Properties
@@ -272,3 +272,4 @@ You can configure Pulsar authentication provider directly from the Kafka propert
 | [`pulsar.consumer.acknowledgments.group.time.millis`](http://pulsar.apache.org/api/client/org/apache/pulsar/client/api/ConsumerBuilder.html#acknowledgmentGroupTime-long-java.util.concurrent.TimeUnit-) | 100 | Set the maximum amount of group time for consumers to send the acknowledgments to the broker. |
 | [`pulsar.consumer.total.receiver.queue.size.across.partitions`](http://pulsar.apache.org/api/client/org/apache/pulsar/client/api/ConsumerConfiguration.html#setMaxTotalReceiverQueueSizeAcrossPartitions-int-) | 50000 | Set the maximum size of the total receiver queue across partitions. |
 | [`pulsar.consumer.subscription.topics.mode`](http://pulsar.apache.org/api/client/org/apache/pulsar/client/api/ConsumerBuilder.html#subscriptionTopicsMode-Mode-) | PersistentOnly | Set the subscription topic mode for consumers. |
+| [`pulsar.crypto.reader.factory.class.name`](http://pulsar.apache.org/api/client/org/apache/pulsar/client/api/ProducerConfiguration.html#setCryptoKeyReader-org.apache.pulsar.client.api.CryptoKeyReader-) | | Specify the CryptoReader-Factory(`CryptoKeyReaderFactory`) classname which allows consumer to create CryptoKeyReader. |
diff --git a/site2/website-next/versioned_docs/version-2.4.2/adaptors-spark.md b/site2/website-next/versioned_docs/version-2.4.2/adaptors-spark.md
index e14f13b..afa5a7e 100644
--- a/site2/website-next/versioned_docs/version-2.4.2/adaptors-spark.md
+++ b/site2/website-next/versioned_docs/version-2.4.2/adaptors-spark.md
@@ -2,7 +2,6 @@
 id: adaptors-spark
 title: Pulsar adaptor for Apache Spark
 sidebar_label: "Apache Spark"
-original_id: adaptors-spark
 ---
 
 ## Spark Streaming receiver
diff --git a/site2/website-next/versioned_docs/version-2.4.2/adaptors-storm.md b/site2/website-next/versioned_docs/version-2.4.2/adaptors-storm.md
index 76d5071..9df9076 100644
--- a/site2/website-next/versioned_docs/version-2.4.2/adaptors-storm.md
+++ b/site2/website-next/versioned_docs/version-2.4.2/adaptors-storm.md
@@ -2,7 +2,6 @@
 id: adaptors-storm
 title: Pulsar adaptor for Apache Storm
 sidebar_label: "Apache Storm"
-original_id: adaptors-storm
 ---
 
 Pulsar Storm is an adaptor for integrating with [Apache Storm](http://storm.apache.org/) topologies. It provides core Storm implementations for sending and receiving data.
diff --git a/site2/website-next/versioned_docs/version-2.4.2/admin-api-brokers.md b/site2/website-next/versioned_docs/version-2.4.2/admin-api-brokers.md
index dbac453..10a90ca 100644
--- a/site2/website-next/versioned_docs/version-2.4.2/admin-api-brokers.md
+++ b/site2/website-next/versioned_docs/version-2.4.2/admin-api-brokers.md
@@ -2,7 +2,6 @@
 id: admin-api-brokers
 title: Managing Brokers
 sidebar_label: "Brokers"
-original_id: admin-api-brokers
 ---
 
 import Tabs from '@theme/Tabs';
@@ -26,9 +25,9 @@ Pulsar brokers consist of two components:
 
 [Brokers](reference-terminology.md#broker) can be managed via:
 
-* The [`brokers`](reference-pulsar-admin.md#brokers) command of the [`pulsar-admin`](reference-pulsar-admin) tool
+* The `brokers` command of the [`pulsar-admin`](https://pulsar.apache.org/tools/pulsar-admin/) tool
 * The `/admin/v2/brokers` endpoint of the admin {@inject: rest:REST:/} API
-* The `brokers` method of the {@inject: javadoc:PulsarAdmin:/admin/org/apache/pulsar/client/admin/PulsarAdmin.html} object in the [Java API](client-libraries-java)
+* The `brokers` method of the `PulsarAdmin` object in the [Java API](client-libraries-java)
 
 In addition to being configurable when you start them up, brokers can also be [dynamically configured](#dynamic-broker-configuration).
 
diff --git a/site2/website-next/versioned_docs/version-2.4.2/admin-api-clusters.md b/site2/website-next/versioned_docs/version-2.4.2/admin-api-clusters.md
index 972c7e1..8687ae6 100644
--- a/site2/website-next/versioned_docs/version-2.4.2/admin-api-clusters.md
+++ b/site2/website-next/versioned_docs/version-2.4.2/admin-api-clusters.md
@@ -2,7 +2,6 @@
 id: admin-api-clusters
 title: Managing Clusters
 sidebar_label: "Clusters"
-original_id: admin-api-clusters
 ---
 
 import Tabs from '@theme/Tabs';
@@ -13,7 +12,7 @@ import TabItem from '@theme/TabItem';
 >
 > This page only shows **some frequently used operations**.
 >
-> - For the latest and complete information about `Pulsar admin`, including commands, flags, descriptions, and more information, see [Pulsar admin doc](https://pulsar.apache.org/tools/pulsar-admin/).
+> - For the latest and complete information about `Pulsar admin`, including commands, flags, descriptions, and more, see [Pulsar admin doc](https://pulsar.apache.org/tools/pulsar-admin/)
 > 
 > - For the latest and complete information about `REST API`, including parameters, responses, samples, and more, see {@inject: rest:REST:/} API doc.
 > 
@@ -24,9 +23,9 @@ servers (aka [bookies](reference-terminology.md#bookie)), and a [ZooKeeper](http
 
 Clusters can be managed via:
 
-* The [`clusters`](reference-pulsar-admin.md#clusters) command of the [`pulsar-admin`](reference-pulsar-admin) tool
+* The `clusters` command of the [`pulsar-admin`]([reference-pulsar-admin.md](https://pulsar.apache.org/tools/pulsar-admin/)) tool
 * The `/admin/v2/clusters` endpoint of the admin {@inject: rest:REST:/} API
-* The `clusters` method of the {@inject: javadoc:PulsarAdmin:/admin/org/apache/pulsar/client/admin/PulsarAdmin} object in the [Java API](client-libraries-java)
+* The `clusters` method of the `PulsarAdmin` object in the [Java API](client-libraries-java)
 
 ## Clusters resources
 
diff --git a/site2/website-next/versioned_docs/version-2.4.2/admin-api-namespaces.md b/site2/website-next/versioned_docs/version-2.4.2/admin-api-namespaces.md
index 216cb6f..c53fa3c 100644
--- a/site2/website-next/versioned_docs/version-2.4.2/admin-api-namespaces.md
+++ b/site2/website-next/versioned_docs/version-2.4.2/admin-api-namespaces.md
@@ -2,7 +2,6 @@
 id: admin-api-namespaces
 title: Managing Namespaces
 sidebar_label: "Namespaces"
-original_id: admin-api-namespaces
 ---
 
 import Tabs from '@theme/Tabs';
@@ -23,9 +22,9 @@ Pulsar [namespaces](reference-terminology.md#namespace) are logical groupings of
 
 Namespaces can be managed via:
 
-* The [`namespaces`](reference-pulsar-admin.md#clusters) command of the [`pulsar-admin`](reference-pulsar-admin) tool
+* The `namespaces` command of the [`pulsar-admin`](https://pulsar.apache.org/tools/pulsar-admin/) tool
 * The `/admin/v2/namespaces` endpoint of the admin {@inject: rest:REST:/} API
-* The `namespaces` method of the {@inject: javadoc:PulsarAdmin:/admin/org/apache/pulsar/client/admin/PulsarAdmin} object in the [Java API](client-libraries-java)
+* The `namespaces` method of the `PulsarAdmin` object in the [Java API](client-libraries-java)
 
 ## Namespaces resources
 
@@ -49,8 +48,12 @@ $ pulsar-admin namespaces create test-tenant/test-namespace
 </TabItem>
 <TabItem value="REST API">
 
+```
+
 {@inject: endpoint|PUT|/admin/v2/namespaces/:tenant/:namespace|operation/createNamespace?version=@pulsar:version_number@}
 
+```
+
 </TabItem>
 <TabItem value="Java">
 
@@ -105,8 +108,12 @@ $ pulsar-admin namespaces policies test-tenant/test-namespace
 </TabItem>
 <TabItem value="REST API">
 
+```
+
 {@inject: endpoint|GET|/admin/v2/namespaces/:tenant/:namespace|operation/getPolicies?version=@pulsar:version_number@}
 
+```
+
 </TabItem>
 <TabItem value="Java">
 
@@ -142,8 +149,12 @@ test-tenant/ns2
 </TabItem>
 <TabItem value="REST API">
 
+```
+
 {@inject: endpoint|GET|/admin/v2/namespaces/:tenant|operation/getTenantNamespaces?version=@pulsar:version_number@}
 
+```
+
 </TabItem>
 <TabItem value="Java">
 
@@ -177,8 +188,12 @@ $ pulsar-admin namespaces delete test-tenant/ns1
 </TabItem>
 <TabItem value="REST API">
 
+```
+
 {@inject: endpoint|DELETE|/admin/v2/namespaces/:tenant/:namespace|operation/deleteNamespace?version=@pulsar:version_number@}
 
+```
+
 </TabItem>
 <TabItem value="Java">
 
@@ -196,7 +211,7 @@ admin.namespaces().deleteNamespace(namespace);
 
 #### Set replication cluster
 
-It sets replication clusters for a namespace, so Pulsar can internally replicate publish message from one colo to another colo.
+You can set replication clusters for a namespace to enable Pulsar to internally replicate the published messages from one colocation facility to another.
 
 <Tabs 
   defaultValue="pulsar-admin"
@@ -234,7 +249,7 @@ admin.namespaces().setNamespaceReplicationClusters(namespace, clusters);
 
 #### Get replication cluster
 
-It gives a list of replication clusters for a given namespace.
+You can get the list of replication clusters for a given namespace.
 
 <Tabs 
   defaultValue="pulsar-admin"
@@ -281,13 +296,13 @@ admin.namespaces().getNamespaceReplicationClusters(namespace)
 
 Backlog quota helps the broker to restrict bandwidth/storage of a namespace once it reaches a certain threshold limit. Admin can set the limit and take corresponding action after the limit is reached.
 
-  1.  producer_request_hold: broker will hold and not persist produce request payload
+  1.  producer_request_hold: broker holds but not persists produce request payload
 
-  2.  producer_exception: broker disconnects with the client by giving an exception.
+  2.  producer_exception: broker disconnects with the client by giving an exception
 
-  3.  consumer_backlog_eviction: broker will start discarding backlog messages
+  3.  consumer_backlog_eviction: broker starts discarding backlog messages
 
-  Backlog quota restriction can be taken care by defining restriction of backlog-quota-type: destination_storage
+Backlog quota restriction can be taken care by defining restriction of backlog-quota-type: destination_storage.
 
 <Tabs 
   defaultValue="pulsar-admin"
@@ -300,12 +315,6 @@ $ pulsar-admin namespaces set-backlog-quota --limit 10G --limitTime 36000 --poli
 
 ```
 
-```
-
-N/A
-
-```
-
 </TabItem>
 <TabItem value="REST API">
 
@@ -330,7 +339,7 @@ admin.namespaces().setBacklogQuota(namespace, new BacklogQuota(limit, limitTime,
 
 #### Get backlog quota policies
 
-It shows a configured backlog quota for a given namespace.
+You can get a configured backlog quota for a given namespace.
 
 <Tabs 
   defaultValue="pulsar-admin"
@@ -378,7 +387,7 @@ admin.namespaces().getBacklogQuotaMap(namespace);
 
 #### Remove backlog quota policies
 
-It removes backlog quota policies for a given namespace
+You can remove backlog quota policies for a given namespace.
 
 <Tabs 
   defaultValue="pulsar-admin"
@@ -391,12 +400,6 @@ $ pulsar-admin namespaces remove-backlog-quota test-tenant/ns1
 
 ```
 
-```
-
-N/A
-
-```
-
 </TabItem>
 <TabItem value="REST API">
 
@@ -423,7 +426,7 @@ admin.namespaces().removeBacklogQuota(namespace, backlogQuotaType)
 
 #### Set persistence policies
 
-Persistence policies allow to configure persistency-level for all topic messages under a given namespace.
+Persistence policies allow users to configure persistency-level for all topic messages under a given namespace.
 
   -   Bookkeeper-ack-quorum: Number of acks (guaranteed copies) to wait for each entry, default: 0
 
@@ -444,12 +447,6 @@ $ pulsar-admin namespaces set-persistence --bookkeeper-ack-quorum 2 --bookkeeper
 
 ```
 
-```
-
-N/A
-
-```
-
 </TabItem>
 <TabItem value="REST API">
 
@@ -474,7 +471,7 @@ admin.namespaces().setPersistence(namespace,new PersistencePolicies(bookkeeperEn
 
 #### Get persistence policies
 
-It shows the configured persistence policies of a given namespace.
+You can get the configured persistence policies of a given namespace.
 
 <Tabs 
   defaultValue="pulsar-admin"
@@ -537,12 +534,6 @@ $ pulsar-admin namespaces unload --bundle 0x00000000_0xffffffff test-tenant/ns1
 
 ```
 
-```
-
-N/A
-
-```
-
 </TabItem>
 <TabItem value="REST API">
 
@@ -567,8 +558,7 @@ admin.namespaces().unloadNamespaceBundle(namespace, bundle)
 
 #### Split namespace bundles
 
-Each namespace bundle can contain multiple topics and each bundle can be served by only one broker. 
-If a single bundle is creating an excessive load on a broker, an admin splits the bundle using this command permitting one or more of the new bundles to be unloaded thus spreading the load across the brokers.
+One namespace bundle can contain multiple topics but can be served by only one broker. If a single bundle is creating an excessive load on a broker, an admin can split the bundle using the command below, permitting one or more of the new bundles to be unloaded, thus balancing the load across the brokers.
 
 <Tabs 
   defaultValue="pulsar-admin"
@@ -581,12 +571,6 @@ $ pulsar-admin namespaces split-bundle --bundle 0x00000000_0xffffffff test-tenan
 
 ```
 
-```
-
-N/A
-
-```
-
 </TabItem>
 <TabItem value="REST API">
 
@@ -613,7 +597,7 @@ admin.namespaces().splitNamespaceBundle(namespace, bundle)
 
 #### Set message-ttl
 
-It configures message’s time to live (in seconds) duration.
+You can configure the time to live (in seconds) duration for messages. In the example below, the message-ttl is set as 100s.
 
 <Tabs 
   defaultValue="pulsar-admin"
@@ -626,12 +610,6 @@ $ pulsar-admin namespaces set-message-ttl --messageTTL 100 test-tenant/ns1
 
 ```
 
-```
-
-N/A
-
-```
-
 </TabItem>
 <TabItem value="REST API">
 
@@ -656,7 +634,7 @@ admin.namespaces().setNamespaceMessageTTL(namespace, messageTTL)
 
 #### Get message-ttl
 
-It gives a message ttl of configured namespace.
+When the message-ttl for a namespace is set, you can use the command below to get the configured value. This example comtinues the example of the command `set message-ttl`, so the returned value is 100(s).
 
 <Tabs 
   defaultValue="pulsar-admin"
@@ -684,6 +662,12 @@ $ pulsar-admin namespaces get-message-ttl test-tenant/ns1
 
 ```
 
+```
+
+100
+
+```
+
 </TabItem>
 <TabItem value="Java">
 
@@ -693,6 +677,12 @@ admin.namespaces().getNamespaceMessageTTL(namespace)
 
 ```
 
+```
+
+100
+
+```
+
 </TabItem>
 
 </Tabs>
@@ -712,12 +702,6 @@ $ pulsar-admin namespaces remove-message-ttl test-tenant/ns1
 
 ```
 
-```
-
-100
-
-```
-
 </TabItem>
 <TabItem value="REST API">
 
@@ -758,12 +742,6 @@ $ pulsar-admin namespaces clear-backlog --sub my-subscription test-tenant/ns1
 
 ```
 
-```
-
-N/A
-
-```
-
 </TabItem>
 <TabItem value="REST API">
 
@@ -801,12 +779,6 @@ $ pulsar-admin namespaces clear-backlog  --bundle 0x00000000_0xffffffff  --sub m
 
 ```
 
-```
-
-N/A
-
-```
-
 </TabItem>
 <TabItem value="REST API">
 
@@ -842,13 +814,7 @@ Each namespace contains multiple topics and the retention size (storage size) of
 
 ```
 
-$ pulsar-admin set-retention --size 100 --time 10 test-tenant/ns1
-
-```
-
-```
-
-N/A
+$ pulsar-admin namespaces set-retention --size 100 --time 10 test-tenant/ns1
 
 ```
 
@@ -932,9 +898,7 @@ disables the throttling.
 :::note
 
 - If neither `clusterDispatchRate` nor `topicDispatchRate` is configured, dispatch throttling is disabled.
->
 - If `topicDispatchRate` is not configured, `clusterDispatchRate` takes effect.
-> 
 - If `topicDispatchRate` is configured, `topicDispatchRate` takes effect.
 
 :::
diff --git a/site2/website-next/versioned_docs/version-2.4.2/admin-api-permissions.md b/site2/website-next/versioned_docs/version-2.4.2/admin-api-permissions.md
index e2ca469..faedbf1 100644
--- a/site2/website-next/versioned_docs/version-2.4.2/admin-api-permissions.md
+++ b/site2/website-next/versioned_docs/version-2.4.2/admin-api-permissions.md
@@ -2,7 +2,6 @@
 id: admin-api-permissions
 title: Managing permissions
 sidebar_label: "Permissions"
-original_id: admin-api-permissions
 ---
 
 import Tabs from '@theme/Tabs';
@@ -13,7 +12,7 @@ import TabItem from '@theme/TabItem';
 >
 > This page only shows **some frequently used operations**.
 >
-> - For the latest and complete information about `Pulsar admin`, including commands, flags, descriptions, and more information, see [Pulsar admin doc](https://pulsar.apache.org/tools/pulsar-admin/).
+> - For the latest and complete information about `Pulsar admin`, including commands, flags, descriptions, and more, see [Pulsar admin doc](https://pulsar.apache.org/tools/pulsar-admin/)
 > 
 > - For the latest and complete information about `REST API`, including parameters, responses, samples, and more, see {@inject: rest:REST:/} API doc.
 > 
diff --git a/site2/website-next/versioned_docs/version-2.4.2/admin-api-persistent-topics.md b/site2/website-next/versioned_docs/version-2.4.2/admin-api-persistent-topics.md
index b6d293b..8a7abae 100644
--- a/site2/website-next/versioned_docs/version-2.4.2/admin-api-persistent-topics.md
+++ b/site2/website-next/versioned_docs/version-2.4.2/admin-api-persistent-topics.md
@@ -2,7 +2,6 @@
 id: admin-api-persistent-topics
 title: Managing persistent topics
 sidebar_label: "Persistent topics"
-original_id: admin-api-persistent-topics
 ---
 
 For details of the content, refer to [manage topics](admin-api-topics).
\ No newline at end of file
diff --git a/site2/website-next/versioned_docs/version-2.4.2/admin-api-schemas.md b/site2/website-next/versioned_docs/version-2.4.2/admin-api-schemas.md
index 9ffe21f..8399a03 100644
--- a/site2/website-next/versioned_docs/version-2.4.2/admin-api-schemas.md
+++ b/site2/website-next/versioned_docs/version-2.4.2/admin-api-schemas.md
@@ -2,6 +2,5 @@
 id: admin-api-schemas
 title: Managing Schemas
 sidebar_label: "Schemas"
-original_id: admin-api-schemas
 ---
 
diff --git a/site2/website-next/versioned_docs/version-2.4.2/admin-api-tenants.md b/site2/website-next/versioned_docs/version-2.4.2/admin-api-tenants.md
index fe68336..570ac31 100644
--- a/site2/website-next/versioned_docs/version-2.4.2/admin-api-tenants.md
+++ b/site2/website-next/versioned_docs/version-2.4.2/admin-api-tenants.md
@@ -2,7 +2,6 @@
 id: admin-api-tenants
 title: Managing Tenants
 sidebar_label: "Tenants"
-original_id: admin-api-tenants
 ---
 
 import Tabs from '@theme/Tabs';
@@ -13,7 +12,7 @@ import TabItem from '@theme/TabItem';
 >
 > This page only shows **some frequently used operations**.
 >
-> - For the latest and complete information about `Pulsar admin`, including commands, flags, descriptions, and more information, see [Pulsar admin doc](https://pulsar.apache.org/tools/pulsar-admin/).
+> - For the latest and complete information about `Pulsar admin`, including commands, flags, descriptions, and more, see [Pulsar admin doc](https://pulsar.apache.org/tools/pulsar-admin/)
 > 
 > - For the latest and complete information about `REST API`, including parameters, responses, samples, and more, see {@inject: rest:REST:/} API doc.
 > 
@@ -80,22 +79,26 @@ $ pulsar-admin tenants create my-tenant
 
 ```
 
-When creating a tenant, you can assign admin roles using the `-r`/`--admin-roles` flag. You can specify multiple roles as a comma-separated list. Here are some examples:
+When creating a tenant, you can optionally assign admin roles using the `-r`/`--admin-roles`
+flag, and clusters using the `-c`/`--allowed-clusters` flag. You can specify multiple values
+as a comma-separated list. Here are some examples:
 
 ```shell
 
 $ pulsar-admin tenants create my-tenant \
-  --admin-roles role1,role2,role3
+  --admin-roles role1,role2,role3 \
+  --allowed-clusters cluster1
 
 $ pulsar-admin tenants create my-tenant \
   -r role1
+  -c cluster1
 
 ```
 
 </TabItem>
 <TabItem value="REST API">
 
-{@inject: endpoint|POST|/admin/v2/tenants/:tenant|operation/createTenant?version=@pulsar:version_number@}
+{@inject: endpoint|PUT|/admin/v2/tenants/:tenant|operation/createTenant?version=@pulsar:version_number@}
 
 </TabItem>
 <TabItem value="JAVA">
@@ -140,7 +143,7 @@ $ pulsar-admin tenants get my-tenant
 </TabItem>
 <TabItem value="REST API">
 
-{@inject: endpoint|GET|/admin/v2/tenants/:cluster|operation/getTenant?version=@pulsar:version_number@}
+{@inject: endpoint|GET|/admin/v2/tenants/:tenant|operation/getTenant?version=@pulsar:version_number@}
 
 </TabItem>
 <TabItem value="JAVA">
@@ -175,7 +178,7 @@ $ pulsar-admin tenants delete my-tenant
 </TabItem>
 <TabItem value="REST API">
 
-{@inject: endpoint|DELETE|/admin/v2/tenants/:cluster|operation/deleteTenant?version=@pulsar:version_number@}
+{@inject: endpoint|DELETE|/admin/v2/tenants/:tenant|operation/deleteTenant?version=@pulsar:version_number@}
 
 </TabItem>
 <TabItem value="JAVA">
@@ -210,7 +213,7 @@ $ pulsar-admin tenants update my-tenant
 </TabItem>
 <TabItem value="REST API">
 
-{@inject: endpoint|DELETE|/admin/v2/tenants/:cluster|operation/updateTenant?version=@pulsar:version_number@}
+{@inject: endpoint|POST|/admin/v2/tenants/:tenant|operation/updateTenant?version=@pulsar:version_number@}
 
 </TabItem>
 <TabItem value="JAVA">
diff --git a/site2/website-next/versioned_docs/version-2.4.2/administration-geo.md b/site2/website-next/versioned_docs/version-2.4.2/administration-geo.md
index 037be9c..3a85608 100644
--- a/site2/website-next/versioned_docs/version-2.4.2/administration-geo.md
+++ b/site2/website-next/versioned_docs/version-2.4.2/administration-geo.md
@@ -166,4 +166,4 @@ Consumer<String> consumer = client.newConsumer(Schema.STRING)
 ### Limitations
 
 * When you enable replicated subscription, you're creating a consistent distributed snapshot to establish an association between message ids from different clusters. The snapshots are taken periodically. The default value is `1 second`. It means that a consumer failing over to a different cluster can potentially receive 1 second of duplicates. You can also configure the frequency of the snapshot in the `broker.conf` file.
-* Only the base line cursor position is synced in replicated subscriptions while the individual acknowledgments are not synced. This means the messages acknowledged out-of-order could end up getting delivered again, in the case of a cluster failover.
+* Only the base line cursor position is synced in replicated subscriptions while the individual acknowledgments are not synced. This means the messages acknowledged out-of-order could end up getting delivered again, in the case of a cluster failover.
\ No newline at end of file
diff --git a/site2/website-next/versioned_docs/version-2.4.2/administration-load-balance.md b/site2/website-next/versioned_docs/version-2.4.2/administration-load-balance.md
index 3efba60..834b156 100644
--- a/site2/website-next/versioned_docs/version-2.4.2/administration-load-balance.md
+++ b/site2/website-next/versioned_docs/version-2.4.2/administration-load-balance.md
@@ -2,13 +2,11 @@
 id: administration-load-balance
 title: Pulsar load balance
 sidebar_label: "Load balance"
-original_id: administration-load-balance
 ---
 
 ## Load balance across Pulsar brokers
 
-Pulsar is an horizontally scalable messaging system, so the traffic
-in a logical cluster must be spread across all the available Pulsar brokers as evenly as possible, which is a core requirement.
+Pulsar is an horizontally scalable messaging system, so the traffic in a logical cluster must be balanced across all the available Pulsar brokers as evenly as possible, which is a core requirement.
 
 You can use multiple settings and tools to control the traffic distribution which require a bit of context to understand how the traffic is managed in Pulsar. Though, in most cases, the core requirement mentioned above is true out of the box and you should not worry about it. 
 
@@ -36,11 +34,9 @@ Instead of individual topic or partition assignment, each broker takes ownership
 
 The namespace is the "administrative" unit: many config knobs or operations are done at the namespace level.
 
-For assignment, a namespaces is sharded into a list of "bundles", with each bundle comprising
-a portion of overall hash range of the namespace.
+For assignment, a namespaces is sharded into a list of "bundles", with each bundle comprising a portion of overall hash range of the namespace.
 
-Topics are assigned to a particular bundle by taking the hash of the topic name and checking in which
-bundle the hash falls into.
+Topics are assigned to a particular bundle by taking the hash of the topic name and checking in which bundle the hash falls into.
 
 Each bundle is independent of the others and thus is independently assigned to different brokers.
 
@@ -72,8 +68,7 @@ On the same note, it is beneficial to start with more bundles than the number of
 
 ### Unload topics and bundles
 
-You can "unload" a topic in Pulsar with admin operation. Unloading means to close the topics,
-release ownership and reassign the topics to a new broker, based on current load.
+You can "unload" a topic in Pulsar with admin operation. Unloading means to close the topics, release ownership and reassign the topics to a new broker, based on current load.
 
 When unloading happens, the client experiences a small latency blip, typically in the order of tens of milliseconds, while the topic is reassigned.
 
@@ -97,9 +92,11 @@ pulsar-admin namespaces unload tenant/namespace
 
 ### Split namespace bundles 
 
-Since the load for the topics in a bundle might change over time, or predicting upfront might just be hard, brokers can split bundles into two. The new smaller bundles can be reassigned to different brokers.
+Since the load for the topics in a bundle might change over time and predicting the load might be hard, bundle split is designed to deal with these issues. The broker splits a bundle into two and the new smaller bundles can be reassigned to different brokers.
 
-The splitting happens based on some tunable thresholds. Any existing bundle that exceeds any of the threshold is a candidate to be split. By default the newly split bundles are also immediately offloaded to other brokers, to facilitate the traffic distribution.
+The splitting is based on some tunable thresholds. Any existing bundle that exceeds any of the threshold is a candidate to be split. By default the newly split bundles are also immediately offloaded to other brokers, to facilitate the traffic distribution. 
+
+You can split namespace bundles in two ways, by setting `supportedNamespaceBundleSplitAlgorithms` to `range_equally_divide` or `topic_count_equally_divide` in `broker.conf` file. The former splits the bundle into two parts with the same hash range size; the latter splits the bundle into two parts with the same number of topics. You can also configure other parameters for namespace bundles.
 
 ```properties
 
@@ -130,13 +127,11 @@ loadBalancerNamespaceMaximumBundles=128
 
 The support for automatic load shedding is available in the load manager of Pulsar. This means that whenever the system recognizes a particular broker is overloaded, the system forces some traffic to be reassigned to less loaded brokers.
 
-When a broker is identified as overloaded, the broker forces to "unload" a subset of the bundles, the
-ones with higher traffic, that make up for the overload percentage.
+When a broker is identified as overloaded, the broker forces to "unload" a subset of the bundles, the ones with higher traffic, that make up for the overload percentage.
 
 For example, the default threshold is 85% and if a broker is over quota at 95% CPU usage, then the broker unloads the percent difference plus a 5% margin: `(95% - 85%) + 5% = 15%`.
 
-Given the selection of bundles to offload is based on traffic (as a proxy measure for cpu, network
-and memory), broker unloads bundles for at least 15% of traffic.
+Given the selection of bundles to offload is based on traffic (as a proxy measure for cpu, network and memory), broker unloads bundles for at least 15% of traffic.
 
 The automatic load shedding is enabled by default and you can disable the automatic load shedding with this setting:
 
@@ -160,6 +155,20 @@ loadBalancerSheddingGracePeriodMinutes=30
 
 ```
 
+Pulsar supports three types of shedding strategies:
+
+##### ThresholdShedder
+This strategy tends to shed the bundles if any broker's usage is above the configured threshold. It does this by first computing the average resource usage per broker for the whole cluster. The resource usage for each broker is calculated using the following method: LocalBrokerData#getMaxResourceUsageWithWeight). The weights for each resource are configurable. Historical observations are included in the running average based on the broker's setting for loadBalancerHistoryResourcePercenta [...]
+`loadBalancerLoadSheddingStrategy=org.apache.pulsar.broker.loadbalance.impl.ThresholdShedder`
+
+##### OverloadShedder
+This strategy will attempt to shed exactly one bundle on brokers which are overloaded, that is, whose maximum system resource usage exceeds loadBalancerBrokerOverloadedThresholdPercentage. To see which resources are considered when determining the maximum system resource. A bundle is recommended for unloading off that broker if and only if the following conditions hold: The broker has at least two bundles assigned and the broker has at least one bundle that has not been unloaded recently [...]
+`loadBalancerLoadSheddingStrategy=org.apache.pulsar.broker.loadbalance.impl.OverloadShedder`
+
+##### UniformLoadShedder
+This strategy tends to distribute load uniformly across all brokers. This strategy checks laod difference between broker with highest load and broker with lowest load. If the difference is higher than configured thresholds `loadBalancerMsgRateDifferenceShedderThreshold` and `loadBalancerMsgThroughputMultiplierDifferenceShedderThreshold` then it finds out bundles which can be unloaded to distribute traffic evenly across all brokers. Configure broker with below value to use this strategy.
+`loadBalancerLoadSheddingStrategy=org.apache.pulsar.broker.loadbalance.impl.UniformLoadShedder`
+
 #### Broker overload thresholds
 
 The determinations of when a broker is overloaded is based on threshold of CPU, network and memory usage. Whenever either of those metrics reaches the threshold, the system triggers the shedding (if enabled).
@@ -175,9 +184,7 @@ loadBalancerBrokerOverloadedThresholdPercentage=85
 
 Pulsar gathers the usage stats from the system metrics.
 
-In case of network utilization, in some cases the network interface speed that Linux reports is
-not correct and needs to be manually overridden. This is the case in AWS EC2 instances with 1Gbps
-NIC speed for which the OS reports 10Gbps speed.
+In case of network utilization, in some cases the network interface speed that Linux reports is not correct and needs to be manually overridden. This is the case in AWS EC2 instances with 1Gbps NIC speed for which the OS reports 10Gbps speed.
 
 Because of the incorrect max speed, the Pulsar load manager might think the broker has not reached the NIC capacity, while in fact the broker already uses all the bandwidth and the traffic is slowed down.
 
diff --git a/site2/website-next/versioned_docs/version-2.4.2/administration-proxy.md b/site2/website-next/versioned_docs/version-2.4.2/administration-proxy.md
index c046ed3..3cef937 100644
--- a/site2/website-next/versioned_docs/version-2.4.2/administration-proxy.md
+++ b/site2/website-next/versioned_docs/version-2.4.2/administration-proxy.md
@@ -2,10 +2,9 @@
 id: administration-proxy
 title: Pulsar proxy
 sidebar_label: "Pulsar proxy"
-original_id: administration-proxy
 ---
 
-Pulsar proxy is an optional gateway. Pulsar proxy is used when direction connections between clients and Pulsar brokers are either infeasible or undesirable. For example, when you run Pulsar in a cloud environment or on [Kubernetes](https://kubernetes.io) or an analogous platform, you can run Pulsar proxy.
+Pulsar proxy is an optional gateway. Pulsar proxy is used when direct connections between clients and Pulsar brokers are either infeasible or undesirable. For example, when you run Pulsar in a cloud environment or on [Kubernetes](https://kubernetes.io) or an analogous platform, you can run Pulsar proxy.
 
 ## Configure the proxy
 
diff --git a/site2/website-next/versioned_docs/version-2.4.2/administration-stats.md b/site2/website-next/versioned_docs/version-2.4.2/administration-stats.md
index ac0c036..2ccd73c 100644
--- a/site2/website-next/versioned_docs/version-2.4.2/administration-stats.md
+++ b/site2/website-next/versioned_docs/version-2.4.2/administration-stats.md
@@ -2,7 +2,6 @@
 id: administration-stats
 title: Pulsar stats
 sidebar_label: "Pulsar statistics"
-original_id: administration-stats
 ---
 
 ## Partitioned topics
diff --git a/site2/website-next/versioned_docs/version-2.4.2/administration-zk-bk.md b/site2/website-next/versioned_docs/version-2.4.2/administration-zk-bk.md
index de10d50..e5f9688 100644
--- a/site2/website-next/versioned_docs/version-2.4.2/administration-zk-bk.md
+++ b/site2/website-next/versioned_docs/version-2.4.2/administration-zk-bk.md
@@ -2,7 +2,6 @@
 id: administration-zk-bk
 title: ZooKeeper and BookKeeper administration
 sidebar_label: "ZooKeeper and BookKeeper"
-original_id: administration-zk-bk
 ---
 
 Pulsar relies on two external systems for essential tasks:
diff --git a/site2/website-next/versioned_docs/version-2.4.2/client-libraries-cpp.md b/site2/website-next/versioned_docs/version-2.4.2/client-libraries-cpp.md
index 333ec67..958861a 100644
--- a/site2/website-next/versioned_docs/version-2.4.2/client-libraries-cpp.md
+++ b/site2/website-next/versioned_docs/version-2.4.2/client-libraries-cpp.md
@@ -2,7 +2,6 @@
 id: client-libraries-cpp
 title: Pulsar C++ client
 sidebar_label: "C++"
-original_id: client-libraries-cpp
 ---
 
 You can use Pulsar C++ client to create Pulsar producers and consumers in C++.
@@ -11,7 +10,7 @@ All the methods in producer, consumer, and reader of a C++ client are thread-saf
 
 ## Supported platforms
 
-Pulsar C++ client is supported on **Linux** and **MacOS** platforms.
+Pulsar C++ client is supported on **Linux** ,**MacOS** and **Windows** platforms.
 
 [Doxygen](http://www.doxygen.nl/)-generated API docs for the C++ client are available [here](/api/cpp).
 
@@ -21,8 +20,8 @@ You need to install the following components before using the C++ client:
 
 * [CMake](https://cmake.org/)
 * [Boost](http://www.boost.org/)
-* [Protocol Buffers](https://developers.google.com/protocol-buffers/) 2.6
-* [libcurl](https://curl.haxx.se/libcurl/)
+* [Protocol Buffers](https://developers.google.com/protocol-buffers/) >= 3
+* [libcurl](https://curl.se/libcurl/)
 * [Google Test](https://github.com/google/googletest)
 
 ## Linux
@@ -147,6 +146,12 @@ $ rpm -ivh apache-pulsar-client*.rpm
 
 After you install RPM successfully, Pulsar libraries are in the `/usr/lib` directory.
 
+:::note
+
+If you get the error that `libpulsar.so: cannot open shared object file: No such file or directory` when starting Pulsar client, you may need to run `ldconfig` first.
+
+:::
+
 ### Install Debian
 
 1. Download a Debian package from the links in the table. 
@@ -236,10 +241,8 @@ $ export OPENSSL_INCLUDE_DIR=/usr/local/opt/openssl/include/
 $ export OPENSSL_ROOT_DIR=/usr/local/opt/openssl/
 
 # Protocol Buffers installation
-$ brew tap homebrew/versions
-$ brew install protobuf260
-$ brew install boost
-$ brew install log4cxx
+$ brew install protobuf boost boost-python log4cxx
+# If you are using python3, you need to install boost-python3 
 
 # Google Test installation
 $ git clone https://github.com/google/googletest.git
@@ -269,6 +272,50 @@ brew install libpulsar
 
 ```
 
+## Windows (64-bit)
+
+### Compilation
+
+1. Clone the Pulsar repository.
+
+```shell
+
+$ git clone https://github.com/apache/pulsar
+
+```
+
+2. Install all necessary dependencies.
+
+```shell
+
+cd ${PULSAR_HOME}/pulsar-client-cpp
+vcpkg install --feature-flags=manifests --triplet x64-windows
+
+```
+
+3. Build C++ libraries.
+
+```shell
+
+cmake -B ./build -A x64 -DBUILD_PYTHON_WRAPPER=OFF -DBUILD_TESTS=OFF -DVCPKG_TRIPLET=x64-windows -DCMAKE_BUILD_TYPE=Release -S .
+cmake --build ./build --config Release
+
+```
+
+> **NOTE**
+>
+> 1. For Windows 32-bit, you need to use `-A Win32` and `-DVCPKG_TRIPLET=x86-windows`.
+> 2. For MSVC Debug mode, you need to replace `Release` with `Debug` for both `CMAKE_BUILD_TYPE` variable and `--config` option.
+
+4. Client libraries are available in the following places.
+
+```
+
+${PULSAR_HOME}/pulsar-client-cpp/build/lib/Release/pulsar.lib
+${PULSAR_HOME}/pulsar-client-cpp/build/lib/Release/pulsar.dll
+
+```
+
 ## Connection URLs
 
 To connect Pulsar using client libraries, you need to specify a Pulsar protocol URL.
@@ -299,109 +346,361 @@ pulsar+ssl://pulsar.us-west.example.com:6651
 
 ## Create a consumer
 
-To use Pulsar as a consumer, you need to create a consumer on the C++ client. The following is an example. 
+To use Pulsar as a consumer, you need to create a consumer on the C++ client. There are two main ways of using the consumer:
+- [Blocking style](#blocking-example): synchronously calling `receive(msg)`.
+- [Non-blocking](#consumer-with-a-message-listener) (event based) style: using a message listener.
+
+### Blocking example
+
+The benefit of this approach is that it is the simplest code. Simply keeps calling `receive(msg)` which blocks until a message is received.
+
+This example starts a subscription at the earliest offset and consumes 100 messages.
 
 ```c++
 
-Client client("pulsar://localhost:6650");
+#include <pulsar/Client.h>
+
+using namespace pulsar;
+
+int main() {
+    Client client("pulsar://localhost:6650");
+
+    Consumer consumer;
+    ConsumerConfiguration config;
+    config.setSubscriptionInitialPosition(InitialPositionEarliest);
+    Result result = client.subscribe("persistent://public/default/my-topic", "consumer-1", config, consumer);
+    if (result != ResultOk) {
+        std::cout << "Failed to subscribe: " << result << std::endl;
+        return -1;
+    }
 
-Consumer consumer;
-Result result = client.subscribe("my-topic", "my-subscription-name", consumer);
-if (result != ResultOk) {
-    LOG_ERROR("Failed to subscribe: " << result);
-    return -1;
+    Message msg;
+    int ctr = 0;
+    // consume 100 messages
+    while (ctr < 100) {
+        consumer.receive(msg);
+        std::cout << "Received: " << msg
+            << "  with payload '" << msg.getDataAsString() << "'" << std::endl;
+
+        consumer.acknowledge(msg);
+        ctr++;
+    }
+
+    std::cout << "Finished consuming synchronously!" << std::endl;
+
+    client.close();
+    return 0;
 }
 
-Message msg;
+```
+
+### Consumer with a message listener
+
+You can avoid  running a loop with blocking calls with an event based style by using a message listener which is invoked for each message that is received.
+
+This example starts a subscription at the earliest offset and consumes 100 messages.
 
-while (true) {
-    consumer.receive(msg);
-    LOG_INFO("Received: " << msg
-            << "  with payload '" << msg.getDataAsString() << "'");
+```c++
+
+#include <pulsar/Client.h>
+#include <atomic>
+#include <thread>
+
+using namespace pulsar;
+
+std::atomic<uint32_t> messagesReceived;
+
+void handleAckComplete(Result res) {
+    std::cout << "Ack res: " << res << std::endl;
+}
 
-    consumer.acknowledge(msg);
+void listener(Consumer consumer, const Message& msg) {
+    std::cout << "Got message " << msg << " with content '" << msg.getDataAsString() << "'" << std::endl;
+    messagesReceived++;
+    consumer.acknowledgeAsync(msg.getMessageId(), handleAckComplete);
 }
 
-client.close();
+int main() {
+    Client client("pulsar://localhost:6650");
+
+    Consumer consumer;
+    ConsumerConfiguration config;
+    config.setMessageListener(listener);
+    config.setSubscriptionInitialPosition(InitialPositionEarliest);
+    Result result = client.subscribe("persistent://public/default/my-topic", "consumer-1", config, consumer);
+    if (result != ResultOk) {
+        std::cout << "Failed to subscribe: " << result << std::endl;
+        return -1;
+    }
+
+    // wait for 100 messages to be consumed
+    while (messagesReceived < 100) {
+        std::this_thread::sleep_for(std::chrono::milliseconds(100));
+    }
+
+    std::cout << "Finished consuming asynchronously!" << std::endl;
+
+    client.close();
+    return 0;
+}
 
 ```
 
 ## Create a producer
 
-To use Pulsar as a producer, you need to create a producer on the C++ client. The following is an example. 
+To use Pulsar as a producer, you need to create a producer on the C++ client. There are two main ways of using a producer:
+- [Blocking style](#simple-blocking-example) : each call to `send` waits for an ack from the broker.
+- [Non-blocking asynchronous style](#non-blocking-example) : `sendAsync` is called instead of `send` and a callback is supplied for when the ack is received from the broker.
+
+### Simple blocking example
+
+This example sends 100 messages using the blocking style. While simple, it does not produce high throughput as it waits for each ack to come back before sending the next message.
 
 ```c++
 
-Client client("pulsar://localhost:6650");
+#include <pulsar/Client.h>
+#include <thread>
 
-Producer producer;
-Result result = client.createProducer("my-topic", producer);
-if (result != ResultOk) {
-    LOG_ERROR("Error creating producer: " << result);
-    return -1;
-}
+using namespace pulsar;
+
+int main() {
+    Client client("pulsar://localhost:6650");
+
+    Result result = client.createProducer("persistent://public/default/my-topic", producer);
+    if (result != ResultOk) {
+        std::cout << "Error creating producer: " << result << std::endl;
+        return -1;
+    }
+
+    // Send 100 messages synchronously
+    int ctr = 0;
+    while (ctr < 100) {
+        std::string content = "msg" + std::to_string(ctr);
+        Message msg = MessageBuilder().setContent(content).setProperty("x", "1").build();
+        Result result = producer.send(msg);
+        if (result != ResultOk) {
+            std::cout << "The message " << content << " could not be sent, received code: " << result << std::endl;
+        } else {
+            std::cout << "The message " << content << " sent successfully" << std::endl;
+        }
+
+        std::this_thread::sleep_for(std::chrono::milliseconds(100));
+        ctr++;
+    }
+
+    std::cout << "Finished producing synchronously!" << std::endl;
 
-// Publish 10 messages to the topic
-for (int i = 0; i < 10; i++){
-    Message msg = MessageBuilder().setContent("my-message").build();
-    Result res = producer.send(msg);
-    LOG_INFO("Message sent: " << res);
+    client.close();
+    return 0;
 }
-client.close();
 
 ```
 
-## Enable authentication in connection URLs
-If you use TLS authentication when connecting to Pulsar, you need to add `ssl` in the connection URLs, and the default port is `6651`. The following is an example.
+### Non-blocking example
 
-```cpp
+This example sends 100 messages using the non-blocking style calling `sendAsync` instead of `send`. This allows the producer to have multiple messages inflight at a time which increases throughput.
 
-ClientConfiguration config = ClientConfiguration();
-config.setUseTls(true);
-config.setTlsTrustCertsFilePath("/path/to/cacert.pem");
-config.setTlsAllowInsecureConnection(false);
-config.setAuth(pulsar::AuthTls::create(
-            "/path/to/client-cert.pem", "/path/to/client-key.pem"););
+The producer configuration `blockIfQueueFull` is useful here to avoid `ResultProducerQueueIsFull` errors when the internal queue for outgoing send requests becomes full. Once the internal queue is full, `sendAsync` becomes blocking which can make your code simpler.
 
-Client client("pulsar+ssl://my-broker.com:6651", config);
+Without this configuration, the result code `ResultProducerQueueIsFull` is passed to the callback. You must decide how to deal with that (retry, discard etc).
+
+```c++
+
+#include <pulsar/Client.h>
+#include <thread>
+
+using namespace pulsar;
+
+std::atomic<uint32_t> acksReceived;
+
+void callback(Result code, const MessageId& msgId, std::string msgContent) {
+    // message processing logic here
+    std::cout << "Received ack for msg: " << msgContent << " with code: "
+        << code << " -- MsgID: " << msgId << std::endl;
+    acksReceived++;
+}
+
+int main() {
+    Client client("pulsar://localhost:6650");
+
+    ProducerConfiguration producerConf;
+    producerConf.setBlockIfQueueFull(true);
+    Producer producer;
+    Result result = client.createProducer("persistent://public/default/my-topic",
+                                          producerConf, producer);
+    if (result != ResultOk) {
+        std::cout << "Error creating producer: " << result << std::endl;
+        return -1;
+    }
+
+    // Send 100 messages asynchronously
+    int ctr = 0;
+    while (ctr < 100) {
+        std::string content = "msg" + std::to_string(ctr);
+        Message msg = MessageBuilder().setContent(content).setProperty("x", "1").build();
+        producer.sendAsync(msg, std::bind(callback,
+                                          std::placeholders::_1, std::placeholders::_2, content));
+
+        std::this_thread::sleep_for(std::chrono::milliseconds(100));
+        ctr++;
+    }
+
+    // wait for 100 messages to be acked
+    while (acksReceived < 100) {
+        std::this_thread::sleep_for(std::chrono::milliseconds(100));
+    }
+
+    std::cout << "Finished producing asynchronously!" << std::endl;
+
+    client.close();
+    return 0;
+}
 
 ```
 
-For complete examples, refer to [C++ client examples](https://github.com/apache/pulsar/tree/master/pulsar-client-cpp/examples).
+### Partitioned topics and lazy producers
 
-## Schema
+When scaling out a Pulsar topic, you may configure a topic to have hundreds of partitions. Likewise, you may have also scaled out your producers so there are hundreds or even thousands of producers. This can put some strain on the Pulsar brokers as when you create a producer on a partitioned topic, internally it creates one internal producer per partition which involves communications to the brokers for each one. So for a topic with 1000 partitions and 1000 producers, it ends up creating [...]
 
-This section describes some examples about schema. For more information about schema, see [Pulsar schema](schema-get-started).
+You can reduce the load caused by this combination of a large number of partitions and many producers by doing the following:
+- use SinglePartition partition routing mode (this ensures that all messages are only sent to a single, randomly selected partition)
+- use non-keyed messages (when messages are keyed, routing is based on the hash of the key and so messages will end up being sent to multiple partitions)
+- use lazy producers (this ensures that an internal producer is only created on demand when a message needs to be routed to a partition)
 
-### Create producer with Avro schema
+With our example above, that reduces the number of internal producers spread out over the 1000 producer apps from 1,000,000 to just 1000.
 
-The following example shows how to create a producer with an Avro schema.
+Note that there can be extra latency for the first message sent. If you set a low send timeout, this timeout could be reached if the initial connection handshake is slow to complete.
 
-```cpp
+```c++
 
-static const std::string exampleSchema =
-    "{\"type\":\"record\",\"name\":\"Example\",\"namespace\":\"test\","
-    "\"fields\":[{\"name\":\"a\",\"type\":\"int\"},{\"name\":\"b\",\"type\":\"int\"}]}";
-Producer producer;
 ProducerConfiguration producerConf;
-producerConf.setSchema(SchemaInfo(AVRO, "Avro", exampleSchema));
-client.createProducer("topic-avro", producerConf, producer);
+producerConf.setPartitionsRoutingMode(ProducerConfiguration::UseSinglePartition);
+producerConf.setLazyStartPartitionedProducers(true);
 
 ```
 
-### Create consumer with Avro schema
-
-The following example shows how to create a consumer with an Avro schema.
+## Enable authentication in connection URLs
+If you use TLS authentication when connecting to Pulsar, you need to add `ssl` in the connection URLs, and the default port is `6651`. The following is an example.
 
 ```cpp
 
-static const std::string exampleSchema =
-    "{\"type\":\"record\",\"name\":\"Example\",\"namespace\":\"test\","
-    "\"fields\":[{\"name\":\"a\",\"type\":\"int\"},{\"name\":\"b\",\"type\":\"int\"}]}";
-ConsumerConfiguration consumerConf;
-Consumer consumer;
-consumerConf.setSchema(SchemaInfo(AVRO, "Avro", exampleSchema));
-client.subscribe("topic-avro", "sub-2", consumerConf, consumer)
+ClientConfiguration config = ClientConfiguration();
+config.setUseTls(true);
+config.setTlsTrustCertsFilePath("/path/to/cacert.pem");
+config.setTlsAllowInsecureConnection(false);
+config.setAuth(pulsar::AuthTls::create(
+            "/path/to/client-cert.pem", "/path/to/client-key.pem"););
+
+Client client("pulsar+ssl://my-broker.com:6651", config);
 
 ```
 
+For complete examples, refer to [C++ client examples](https://github.com/apache/pulsar/tree/master/pulsar-client-cpp/examples).
+
+## Schema
+
+This section describes some examples about schema. For more information about
+schema, see [Pulsar schema](schema-get-started).
+
+### Avro schema
+
+- The following example shows how to create a producer with an Avro schema.
+
+  ```cpp
+  
+  static const std::string exampleSchema =
+      "{\"type\":\"record\",\"name\":\"Example\",\"namespace\":\"test\","
+      "\"fields\":[{\"name\":\"a\",\"type\":\"int\"},{\"name\":\"b\",\"type\":\"int\"}]}";
+  Producer producer;
+  ProducerConfiguration producerConf;
+  producerConf.setSchema(SchemaInfo(AVRO, "Avro", exampleSchema));
+  client.createProducer("topic-avro", producerConf, producer);
+  
+  ```
+
+- The following example shows how to create a consumer with an Avro schema.
+
+  ```cpp
+  
+  static const std::string exampleSchema =
+      "{\"type\":\"record\",\"name\":\"Example\",\"namespace\":\"test\","
+      "\"fields\":[{\"name\":\"a\",\"type\":\"int\"},{\"name\":\"b\",\"type\":\"int\"}]}";
+  ConsumerConfiguration consumerConf;
+  Consumer consumer;
+  consumerConf.setSchema(SchemaInfo(AVRO, "Avro", exampleSchema));
+  client.subscribe("topic-avro", "sub-2", consumerConf, consumer)
+  
+  ```
+
+### ProtobufNative schema
+
+The following example shows how to create a producer and a consumer with a ProtobufNative schema.
+​
+1. Generate the `User` class using Protobuf3. 
+
+   :::note
+
+   You need to use Protobuf3 or later versions.
+
+   :::
+
+​
+
+   ```protobuf
+   
+   syntax = "proto3";
+   
+   message User {
+       string name = 1;
+       int32 age = 2;
+   }
+   
+   ```
+
+​
+2. Include the `ProtobufNativeSchema.h` in your source code. Ensure the Protobuf dependency has been added to your project.
+​
+
+   ```c++
+   
+   #include <pulsar/ProtobufNativeSchema.h>
+   
+   ```
+
+​
+3. Create a producer to send a `User` instance.
+​
+
+   ```c++
+   
+   ProducerConfiguration producerConf;
+   producerConf.setSchema(createProtobufNativeSchema(User::GetDescriptor()));
+   Producer producer;
+   client.createProducer("topic-protobuf", producerConf, producer);
+   User user;
+   user.set_name("my-name");
+   user.set_age(10);
+   std::string content;
+   user.SerializeToString(&content);
+   producer.send(MessageBuilder().setContent(content).build());
+   
+   ```
+
+​
+4. Create a consumer to receive a `User` instance.
+​
+
+   ```c++
+   
+   ConsumerConfiguration consumerConf;
+   consumerConf.setSchema(createProtobufNativeSchema(User::GetDescriptor()));
+   consumerConf.setSubscriptionInitialPosition(InitialPositionEarliest);
+   Consumer consumer;
+   client.subscribe("topic-protobuf", "my-sub", consumerConf, consumer);
+   Message msg;
+   consumer.receive(msg);
+   User user2;
+   user2.ParseFromArray(msg.getData(), msg.getLength());
+   
+   ```
+
diff --git a/site2/website-next/versioned_docs/version-2.4.2/client-libraries-go.md b/site2/website-next/versioned_docs/version-2.4.2/client-libraries-go.md
index 7e797bb..22df463 100644
--- a/site2/website-next/versioned_docs/version-2.4.2/client-libraries-go.md
+++ b/site2/website-next/versioned_docs/version-2.4.2/client-libraries-go.md
@@ -2,7 +2,6 @@
 id: client-libraries-go
 title: Pulsar Go client
 sidebar_label: "Go"
-original_id: client-libraries-go
 ---
 
 > Tips: Currently, the CGo client will be deprecated, if you want to know more about the CGo client, please refer to [CGo client docs](client-libraries-cgo)
@@ -286,7 +285,8 @@ defer client.Close()
 
 topicName := newTopicName()
 producer, err := client.CreateProducer(pulsar.ProducerOptions{
-	Topic: topicName,
+    Topic:           topicName,
+    DisableBatching: true,
 })
 if err != nil {
 	log.Fatal(err)
@@ -330,6 +330,85 @@ canc()
 
 ```
 
+#### How to use Prometheus metrics in producer
+
+Pulsar Go client registers client metrics using Prometheus. This section demonstrates how to create a simple Pulsar producer application that exposes Prometheus metrics via HTTP.
+
+1. Write a simple producer application.
+
+```go
+
+// Create a Pulsar client
+client, err := pulsar.NewClient(pulsar.ClientOptions{
+	URL: "pulsar://localhost:6650",
+})
+if err != nil {
+    log.Fatal(err)
+}
+
+defer client.Close()
+
+// Start a separate goroutine for Prometheus metrics
+// In this case, Prometheus metrics can be accessed via http://localhost:2112/metrics
+go func() {
+    prometheusPort := 2112
+    log.Printf("Starting Prometheus metrics at http://localhost:%v/metrics\n", prometheusPort)
+    http.Handle("/metrics", promhttp.Handler())
+    err = http.ListenAndServe(":"+strconv.Itoa(prometheusPort), nil)
+    if err != nil {
+        log.Fatal(err)
+    }
+}()
+
+// Create a producer
+producer, err := client.CreateProducer(pulsar.ProducerOptions{
+    Topic: "topic-1",
+})
+if err != nil {
+    log.Fatal(err)
+}
+
+defer producer.Close()
+
+ctx := context.Background()
+
+// Write your business logic here
+// In this case, you build a simple Web server. You can produce messages by requesting http://localhost:8082/produce
+webPort := 8082
+http.HandleFunc("/produce", func(w http.ResponseWriter, r *http.Request) {
+    msgId, err := producer.Send(ctx, &pulsar.ProducerMessage{
+        Payload: []byte(fmt.Sprintf("hello world")),
+    })
+    if err != nil {
+        log.Fatal(err)
+    } else {
+        log.Printf("Published message: %v", msgId)
+        fmt.Fprintf(w, "Published message: %v", msgId)
+    }
+})
+
+err = http.ListenAndServe(":"+strconv.Itoa(webPort), nil)
+if err != nil {
+    log.Fatal(err)
+}
+
+```
+
+2. To scrape metrics from applications, configure a local running Prometheus instance using a configuration file (`prometheus.yml`).
+
+```yaml
+
+scrape_configs:
+- job_name: pulsar-client-go-metrics
+  scrape_interval: 10s
+  static_configs:
+  - targets:
+  - localhost:2112
+
+```
+
+Now you can query Pulsar client metrics on Prometheus.
+
 ### Producer configuration
 
  Name | Description | Default
@@ -607,6 +686,85 @@ defer consumer.Close()
 
 ```
 
+#### How to use Prometheus metrics in consumer
+
+In this guide, This section demonstrates how to create a simple Pulsar consumer application that exposes Prometheus metrics via HTTP.
+1. Write a simple consumer application.
+
+```go
+
+// Create a Pulsar client
+client, err := pulsar.NewClient(pulsar.ClientOptions{
+    URL: "pulsar://localhost:6650",
+})
+if err != nil {
+    log.Fatal(err)
+}
+
+defer client.Close()
+
+// Start a separate goroutine for Prometheus metrics
+// In this case, Prometheus metrics can be accessed via http://localhost:2112/metrics
+go func() {
+    prometheusPort := 2112
+    log.Printf("Starting Prometheus metrics at http://localhost:%v/metrics\n", prometheusPort)
+    http.Handle("/metrics", promhttp.Handler())
+    err = http.ListenAndServe(":"+strconv.Itoa(prometheusPort), nil)
+    if err != nil {
+        log.Fatal(err)
+    }
+}()
+
+// Create a consumer
+consumer, err := client.Subscribe(pulsar.ConsumerOptions{
+    Topic:            "topic-1",
+    SubscriptionName: "sub-1",
+    Type:             pulsar.Shared,
+})
+if err != nil {
+    log.Fatal(err)
+}
+
+defer consumer.Close()
+
+ctx := context.Background()
+
+// Write your business logic here
+// In this case, you build a simple Web server. You can consume messages by requesting http://localhost:8083/consume
+webPort := 8083
+http.HandleFunc("/consume", func(w http.ResponseWriter, r *http.Request) {
+    msg, err := consumer.Receive(ctx)
+    if err != nil {
+        log.Fatal(err)
+    } else {
+        log.Printf("Received message msgId: %v -- content: '%s'\n", msg.ID(), string(msg.Payload()))
+        fmt.Fprintf(w, "Received message msgId: %v -- content: '%s'\n", msg.ID(), string(msg.Payload()))
+        consumer.Ack(msg)
+    }
+})
+
+err = http.ListenAndServe(":"+strconv.Itoa(webPort), nil)
+if err != nil {
+    log.Fatal(err)
+}
+
+```
+
+2. To scrape metrics from applications, configure a local running Prometheus instance using a configuration file (`prometheus.yml`).
+
+```yaml
+
+scrape_configs:
+- job_name: pulsar-client-go-metrics
+  scrape_interval: 10s
+  static_configs:
+  - targets:
+  - localhost:2112
+
+```
+
+Now you can query Pulsar client metrics on Prometheus.
+
 ### Consumer configuration
 
  Name | Description | Default
diff --git a/site2/website-next/versioned_docs/version-2.4.2/client-libraries-websocket.md b/site2/website-next/versioned_docs/version-2.4.2/client-libraries-websocket.md
index bc13b43..c663f97 100644
--- a/site2/website-next/versioned_docs/version-2.4.2/client-libraries-websocket.md
+++ b/site2/website-next/versioned_docs/version-2.4.2/client-libraries-websocket.md
@@ -2,7 +2,6 @@
 id: client-libraries-websocket
 title: Pulsar WebSocket API
 sidebar_label: "WebSocket"
-original_id: client-libraries-websocket
 ---
 
 Pulsar [WebSocket](https://developer.mozilla.org/en-US/docs/Web/API/WebSockets_API) API provides a simple way to interact with Pulsar using languages that do not have an official [client library](getting-started-clients). Through WebSocket, you can publish and consume messages and use features available on the [Client Features Matrix](https://github.com/apache/pulsar/wiki/Client-Features-Matrix) page.
@@ -190,7 +189,7 @@ Key | Type | Required? | Explanation
 `maxRedeliverCount` | int | no | Define a [maxRedeliverCount](http://pulsar.apache.org/api/client/org/apache/pulsar/client/api/ConsumerBuilder.html#deadLetterPolicy-org.apache.pulsar.client.api.DeadLetterPolicy-) for the consumer (default: 0). Activates [Dead Letter Topic](https://github.com/apache/pulsar/wiki/PIP-22%3A-Pulsar-Dead-Letter-Topic) feature.
 `deadLetterTopic` | string | no | Define a [deadLetterTopic](http://pulsar.apache.org/api/client/org/apache/pulsar/client/api/ConsumerBuilder.html#deadLetterPolicy-org.apache.pulsar.client.api.DeadLetterPolicy-) for the consumer (default: {topic}-{subscription}-DLQ). Activates [Dead Letter Topic](https://github.com/apache/pulsar/wiki/PIP-22%3A-Pulsar-Dead-Letter-Topic) feature.
 `pullMode` | boolean | no | Enable pull mode (default: false). See "Flow Control" below.
-`negativeAckRedeliveryDelay` | int | no | When a message is negatively acknowledged, it will be redelivered to the DLQ.
+`negativeAckRedeliveryDelay` | int | no | When a message is negatively acknowledged, the delay time before the message is redelivered (in milliseconds). The default value is 60000.
 `token` | string | no | Authentication token, this is used for the browser javascript client
 
 NB: these parameter (except `pullMode`) apply to the internal consumer of the WebSocket service.
@@ -204,23 +203,60 @@ Server will push messages on the WebSocket session:
 ```json
 
 {
-  "messageId": "CAAQAw==",
-  "payload": "SGVsbG8gV29ybGQ=",
-  "properties": {"key1": "value1", "key2": "value2"},
-  "publishTime": "2016-08-30 16:45:57.785",
-  "redeliveryCount": 4
+  "messageId": "CAMQADAA",
+  "payload": "hvXcJvHW7kOSrUn17P2q71RA5SdiXwZBqw==",
+  "properties": {},
+  "publishTime": "2021-10-29T16:01:38.967-07:00",
+  "redeliveryCount": 0,
+  "encryptionContext": {
+    "keys": {
+      "client-rsa.pem": {
+        "keyValue": "jEuwS+PeUzmCo7IfLNxqoj4h7txbLjCQjkwpaw5AWJfZ2xoIdMkOuWDkOsqgFmWwxiecakS6GOZHs94x3sxzKHQx9Oe1jpwBg2e7L4fd26pp+WmAiLm/ArZJo6JotTeFSvKO3u/yQtGTZojDDQxiqFOQ1ZbMdtMZA8DpSMuq+Zx7PqLo43UdW1+krjQfE5WD+y+qE3LJQfwyVDnXxoRtqWLpVsAROlN2LxaMbaftv5HckoejJoB4xpf/dPOUqhnRstwQHf6klKT5iNhjsY4usACt78uILT0pEPd14h8wEBidBz/vAlC/zVMEqiDVzgNS7dqEYS4iHbf7cnWVCn3Hxw==",
+        "metadata": {}
+      }
+    },
+    "param": "Tfu1PxVm6S9D3+Hk",
+    "compressionType": "NONE",
+    "uncompressedMessageSize": 0,
+    "batchSize": {
+      "empty": false,
+      "present": true
+    }
+  }
 }
 
 ```
 
-Key | Type | Required? | Explanation
-:---|:-----|:----------|:-----------
-`messageId` | string | yes | Message ID
-`payload` | string | yes | Base-64 encoded payload
-`publishTime` | string | yes | Publish timestamp
-`redeliveryCount` | number | yes | Number of times this message was already delivered
-`properties` | key-value pairs | no | Application-defined properties
-`key` | string | no |  Original routing key set by producer
+Below are the parameters in the WebSocket consumer response.
+
+- General parameters
+
+  Key | Type | Required? | Explanation
+  :---|:-----|:----------|:-----------
+  `messageId` | string | yes | Message ID
+  `payload` | string | yes | Base-64 encoded payload
+  `publishTime` | string | yes | Publish timestamp
+  `redeliveryCount` | number | yes | Number of times this message was already delivered
+  `properties` | key-value pairs | no | Application-defined properties
+  `key` | string | no |  Original routing key set by producer
+  `encryptionContext` | EncryptionContext | no | Encryption context that consumers can use to decrypt received messages
+  `param` | string | no | Initialization vector for cipher (Base64 encoding)
+  `batchSize` | string | no | Number of entries in a message (if it is a batch message)
+  `uncompressedMessageSize` | string | no | Message size before compression
+  `compressionType` | string | no | Algorithm used to compress the message payload
+
+- `encryptionContext` related parameter
+
+  Key | Type | Required? | Explanation
+  :---|:-----|:----------|:-----------
+  `keys` |key-EncryptionKey pairs | yes | Key in `key-EncryptionKey` pairs is an encryption key name. Value in `key-EncryptionKey` pairs is an encryption key object.
+
+- `encryptionKey` related parameters
+
+  Key | Type | Required? | Explanation
+  :---|:-----|:----------|:-----------
+  `keyValue` | string | yes | Encryption key (Base64 encoding)
+  `metadata` | key-value pairs | no | Application-defined metadata
 
 #### Acknowledging the message
 
@@ -454,9 +490,15 @@ TOPIC = scheme + '://localhost:8080/ws/v2/producer/persistent/public/default/my-
 
 ws = websocket.create_connection(TOPIC)
 
+# encode message
+s = "Hello World"
+firstEncoded = s.encode("UTF-8")
+binaryEncoded = base64.b64encode(firstEncoded)
+payloadString = binaryEncoded.decode('UTF-8')
+
 # Send one message as JSON
 ws.send(json.dumps({
-    'payload' : base64.b64encode('Hello World'),
+    'payload' : payloadString,
     'properties': {
         'key1' : 'value1',
         'key2' : 'value2'
@@ -466,9 +508,9 @@ ws.send(json.dumps({
 
 response =  json.loads(ws.recv())
 if response['result'] == 'ok':
-    print 'Message published successfully'
+    print( 'Message published successfully')
 else:
-    print 'Failed to publish message:', response
+    print('Failed to publish message:', response)
 ws.close()
 
 ```
@@ -495,7 +537,7 @@ while True:
     msg = json.loads(ws.recv())
     if not msg: break
 
-    print "Received: {} - payload: {}".format(msg, base64.b64decode(msg['payload']))
+    print( "Received: {} - payload: {}".format(msg, base64.b64decode(msg['payload'])))
 
     # Acknowledge successful processing
     ws.send(json.dumps({'messageId' : msg['messageId']}))
@@ -525,7 +567,7 @@ while True:
     msg = json.loads(ws.recv())
     if not msg: break
 
-    print "Received: {} - payload: {}".format(msg, base64.b64decode(msg['payload']))
+    print ( "Received: {} - payload: {}".format(msg, base64.b64decode(msg['payload'])))
 
     # Acknowledge successful processing
     ws.send(json.dumps({'messageId' : msg['messageId']}))
diff --git a/site2/website-next/versioned_docs/version-2.4.2/client-libraries.md b/site2/website-next/versioned_docs/version-2.4.2/client-libraries.md
index 23e5a06..ab5b7c4 100644
--- a/site2/website-next/versioned_docs/version-2.4.2/client-libraries.md
+++ b/site2/website-next/versioned_docs/version-2.4.2/client-libraries.md
@@ -2,7 +2,6 @@
 id: client-libraries
 title: Pulsar client libraries
 sidebar_label: "Overview"
-original_id: client-libraries
 ---
 
 Pulsar supports the following client libraries:
@@ -16,7 +15,7 @@ Pulsar supports the following client libraries:
 - [C# client](client-libraries-dotnet)
 
 ## Feature matrix
-Pulsar client feature matrix for different languages is listed on [Client Features Matrix](https://github.com/apache/pulsar/wiki/Client-Features-Matrix) page.
+Pulsar client feature matrix for different languages is listed on [Pulsar Feature Matrix (Client and Function)](https://github.com/apache/pulsar/wiki/PIP-108%3A-Pulsar-Feature-Matrix-%28Client-and-Function%29) page.
 
 ## Third-party clients
 
@@ -33,3 +32,4 @@ Besides the official released clients, multiple projects on developing Pulsar cl
 | Scala | [pulsar4s](https://github.com/sksamuel/pulsar4s) | [sksamuel](https://github.com/sksamuel) | [![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0) | Idomatic, typesafe, and reactive Scala client for Apache Pulsar |
 | Rust | [pulsar-rs](https://github.com/wyyerd/pulsar-rs) | [Wyyerd Group](https://github.com/wyyerd) | [![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0) | Future-based Rust bindings for Apache Pulsar |
 | .NET | [pulsar-client-dotnet](https://github.com/fsharplang-ru/pulsar-client-dotnet) | [Lanayx](https://github.com/Lanayx) | [![GitHub](https://img.shields.io/badge/license-MIT-green.svg)](https://opensource.org/licenses/MIT) | Native .NET client for C#/F#/VB |
+| Node.js | [pulsar-flex](https://github.com/ayeo-flex-org/pulsar-flex) | [Daniel Sinai](https://github.com/danielsinai), [Ron Farkash](https://github.com/ronfarkash), [Gal Rosenberg](https://github.com/galrose)| [![GitHub](https://img.shields.io/badge/license-MIT-green.svg)](https://opensource.org/licenses/MIT) | Native Nodejs client |
diff --git a/site2/website-next/versioned_docs/version-2.4.2/concepts-architecture-overview.md b/site2/website-next/versioned_docs/version-2.4.2/concepts-architecture-overview.md
index 6a501d2..8fe0717 100644
--- a/site2/website-next/versioned_docs/version-2.4.2/concepts-architecture-overview.md
+++ b/site2/website-next/versioned_docs/version-2.4.2/concepts-architecture-overview.md
@@ -2,7 +2,6 @@
 id: concepts-architecture-overview
 title: Architecture Overview
 sidebar_label: "Architecture"
-original_id: concepts-architecture-overview
 ---
 
 At the highest level, a Pulsar instance is composed of one or more Pulsar clusters. Clusters within an instance can [replicate](concepts-replication) data amongst themselves.
@@ -146,7 +145,7 @@ Some important things to know about the Pulsar proxy:
 
 ## Service discovery
 
-[Clients](getting-started-clients) connecting to Pulsar brokers need to be able to communicate with an entire Pulsar instance using a single URL. Pulsar provides a built-in service discovery mechanism that you can set up using the instructions in the [Deploying a Pulsar instance](deploy-bare-metal.md#service-discovery-setup) guide.
+[Clients](getting-started-clients) connecting to Pulsar brokers need to be able to communicate with an entire Pulsar instance using a single URL.
 
 You can use your own service discovery system if you'd like. If you use your own system, there is just one requirement: when a client performs an HTTP request to an endpoint, such as `http://pulsar.us-west.example.com:8080`, the client needs to be redirected to *some* active broker in the desired cluster, whether via DNS, an HTTP or IP redirect, or some other means.
 
diff --git a/site2/website-next/versioned_docs/version-2.4.2/concepts-authentication.md b/site2/website-next/versioned_docs/version-2.4.2/concepts-authentication.md
index b375ecb..335da8d 100644
--- a/site2/website-next/versioned_docs/version-2.4.2/concepts-authentication.md
+++ b/site2/website-next/versioned_docs/version-2.4.2/concepts-authentication.md
@@ -2,7 +2,6 @@
 id: concepts-authentication
 title: Authentication and Authorization
 sidebar_label: "Authentication and Authorization"
-original_id: concepts-authentication
 ---
 
 Pulsar supports a pluggable [authentication](security-overview.md) mechanism which can be configured at the proxy and/or the broker. Pulsar also supports a pluggable [authorization](security-authorization) mechanism. These mechanisms work together to identify the client and its access rights on topics, namespaces and tenants.
diff --git a/site2/website-next/versioned_docs/version-2.4.2/concepts-clients.md b/site2/website-next/versioned_docs/version-2.4.2/concepts-clients.md
index b68f76a..65201b5 100644
--- a/site2/website-next/versioned_docs/version-2.4.2/concepts-clients.md
+++ b/site2/website-next/versioned_docs/version-2.4.2/concepts-clients.md
@@ -2,7 +2,6 @@
 id: concepts-clients
 title: Pulsar Clients
 sidebar_label: "Clients"
-original_id: concepts-clients
 ---
 
 Pulsar exposes a client API with language bindings for [Java](client-libraries-java.md), [Go](client-libraries-go.md), [Python](client-libraries-python.md), [C++](client-libraries-cpp.md) and [C#](client-libraries-dotnet). The client API optimizes and encapsulates Pulsar's client-broker communication protocol and exposes a simple and intuitive API for use by applications.
diff --git a/site2/website-next/versioned_docs/version-2.4.2/concepts-multi-tenancy.md b/site2/website-next/versioned_docs/version-2.4.2/concepts-multi-tenancy.md
index be752cc..8a17e72 100644
--- a/site2/website-next/versioned_docs/version-2.4.2/concepts-multi-tenancy.md
+++ b/site2/website-next/versioned_docs/version-2.4.2/concepts-multi-tenancy.md
@@ -2,7 +2,6 @@
 id: concepts-multi-tenancy
 title: Multi Tenancy
 sidebar_label: "Multi Tenancy"
-original_id: concepts-multi-tenancy
 ---
 
 Pulsar was created from the ground up as a multi-tenant system. To support multi-tenancy, Pulsar has a concept of tenants. Tenants can be spread across clusters and can each have their own [authentication and authorization](security-overview) scheme applied to them. They are also the administrative unit at which storage quotas, [message TTL](cookbooks-retention-expiry.md#time-to-live-ttl), and isolation policies can be managed.
diff --git a/site2/website-next/versioned_docs/version-2.4.2/concepts-overview.md b/site2/website-next/versioned_docs/version-2.4.2/concepts-overview.md
index b903fa4..c76032c 100644
--- a/site2/website-next/versioned_docs/version-2.4.2/concepts-overview.md
+++ b/site2/website-next/versioned_docs/version-2.4.2/concepts-overview.md
@@ -2,7 +2,6 @@
 id: concepts-overview
 title: Pulsar Overview
 sidebar_label: "Overview"
-original_id: concepts-overview
 ---
 
 Pulsar is a multi-tenant, high-performance solution for server-to-server messaging. Pulsar was originally developed by Yahoo, it is under the stewardship of the [Apache Software Foundation](https://www.apache.org/).
diff --git a/site2/website-next/versioned_docs/version-2.4.2/concepts-replication.md b/site2/website-next/versioned_docs/version-2.4.2/concepts-replication.md
index 6e23962..11677cc 100644
--- a/site2/website-next/versioned_docs/version-2.4.2/concepts-replication.md
+++ b/site2/website-next/versioned_docs/version-2.4.2/concepts-replication.md
@@ -2,7 +2,6 @@
 id: concepts-replication
 title: Geo Replication
 sidebar_label: "Geo Replication"
-original_id: concepts-replication
 ---
 
 Pulsar enables messages to be produced and consumed in different geo-locations. For instance, your application may be publishing data in one region or market and you would like to process it for consumption in other regions or markets. [Geo-replication](administration-geo) in Pulsar enables you to do that.
diff --git a/site2/website-next/versioned_docs/version-2.4.2/concepts-topic-compaction.md b/site2/website-next/versioned_docs/version-2.4.2/concepts-topic-compaction.md
index c85e703..3356298 100644
--- a/site2/website-next/versioned_docs/version-2.4.2/concepts-topic-compaction.md
+++ b/site2/website-next/versioned_docs/version-2.4.2/concepts-topic-compaction.md
@@ -2,7 +2,6 @@
 id: concepts-topic-compaction
 title: Topic Compaction
 sidebar_label: "Topic Compaction"
-original_id: concepts-topic-compaction
 ---
 
 Pulsar was built with highly scalable [persistent storage](concepts-architecture-overview.md#persistent-storage) of message data as a primary objective. Pulsar topics enable you to persistently store as many unacknowledged messages as you need while preserving message ordering. By default, Pulsar stores *all* unacknowledged/unprocessed messages produced on a topic. Accumulating many unacknowledged messages on a topic is necessary for many Pulsar use cases but it can also be very time int [...]
diff --git a/site2/website-next/versioned_docs/version-2.4.2/cookbooks-bookkeepermetadata.md b/site2/website-next/versioned_docs/version-2.4.2/cookbooks-bookkeepermetadata.md
index b0fa98d..187cb65 100644
--- a/site2/website-next/versioned_docs/version-2.4.2/cookbooks-bookkeepermetadata.md
+++ b/site2/website-next/versioned_docs/version-2.4.2/cookbooks-bookkeepermetadata.md
@@ -1,7 +1,6 @@
 ---
 id: cookbooks-bookkeepermetadata
 title: BookKeeper Ledger Metadata
-original_id: cookbooks-bookkeepermetadata
 ---
 
 Pulsar stores data on BookKeeper ledgers, you can understand the contents of a ledger by inspecting the metadata attached to the ledger.
diff --git a/site2/website-next/versioned_docs/version-2.4.2/cookbooks-deduplication.md b/site2/website-next/versioned_docs/version-2.4.2/cookbooks-deduplication.md
index 1669afa..307fe03 100644
--- a/site2/website-next/versioned_docs/version-2.4.2/cookbooks-deduplication.md
+++ b/site2/website-next/versioned_docs/version-2.4.2/cookbooks-deduplication.md
@@ -1,8 +1,7 @@
 ---
 id: cookbooks-deduplication
 title: Message deduplication
-sidebar_label: "Message deduplication"
-original_id: cookbooks-deduplication
+sidebar_label: "Message deduplication "
 ---
 
 import Tabs from '@theme/Tabs';
diff --git a/site2/website-next/versioned_docs/version-2.4.2/cookbooks-encryption.md b/site2/website-next/versioned_docs/version-2.4.2/cookbooks-encryption.md
index f0d8fb8..fbd1c97 100644
--- a/site2/website-next/versioned_docs/version-2.4.2/cookbooks-encryption.md
+++ b/site2/website-next/versioned_docs/version-2.4.2/cookbooks-encryption.md
@@ -1,8 +1,7 @@
 ---
 id: cookbooks-encryption
 title: Pulsar Encryption
-sidebar_label: "Encryption"
-original_id: cookbooks-encryption
+sidebar_label: "Encryption "
 ---
 
 Pulsar encryption allows applications to encrypt messages at the producer and decrypt at the consumer. Encryption is performed using the public/private key pair configured by the application. Encrypted messages can only be decrypted by consumers with a valid key.
diff --git a/site2/website-next/versioned_docs/version-2.4.2/cookbooks-message-queue.md b/site2/website-next/versioned_docs/version-2.4.2/cookbooks-message-queue.md
index eb43cbd..9b93a94 100644
--- a/site2/website-next/versioned_docs/version-2.4.2/cookbooks-message-queue.md
+++ b/site2/website-next/versioned_docs/version-2.4.2/cookbooks-message-queue.md
@@ -2,7 +2,6 @@
 id: cookbooks-message-queue
 title: Using Pulsar as a message queue
 sidebar_label: "Message queue"
-original_id: cookbooks-message-queue
 ---
 
 Message queues are essential components of many large-scale data architectures. If every single work object that passes through your system absolutely *must* be processed in spite of the slowness or downright failure of this or that system component, there's a good chance that you'll need a message queue to step in and ensure that unprocessed data is retained---with correct ordering---until the required actions are taken.
diff --git a/site2/website-next/versioned_docs/version-2.4.2/cookbooks-non-persistent.md b/site2/website-next/versioned_docs/version-2.4.2/cookbooks-non-persistent.md
index 391569a..d40c4fb 100644
--- a/site2/website-next/versioned_docs/version-2.4.2/cookbooks-non-persistent.md
+++ b/site2/website-next/versioned_docs/version-2.4.2/cookbooks-non-persistent.md
@@ -2,7 +2,6 @@
 id: cookbooks-non-persistent
 title: Non-persistent messaging
 sidebar_label: "Non-persistent messaging"
-original_id: cookbooks-non-persistent
 ---
 
 **Non-persistent topics** are Pulsar topics in which message data is *never* [persistently stored](concepts-architecture-overview.md#persistent-storage) and kept only in memory. This cookbook provides:
diff --git a/site2/website-next/versioned_docs/version-2.4.2/cookbooks-partitioned.md b/site2/website-next/versioned_docs/version-2.4.2/cookbooks-partitioned.md
index 7882fb9..2589693 100644
--- a/site2/website-next/versioned_docs/version-2.4.2/cookbooks-partitioned.md
+++ b/site2/website-next/versioned_docs/version-2.4.2/cookbooks-partitioned.md
@@ -2,6 +2,5 @@
 id: cookbooks-partitioned
 title: Partitioned topics
 sidebar_label: "Partitioned Topics"
-original_id: cookbooks-partitioned
 ---
 For details of the content, refer to [manage topics](admin-api-topics).
\ No newline at end of file
diff --git a/site2/website-next/versioned_docs/version-2.4.2/cookbooks-retention-expiry.md b/site2/website-next/versioned_docs/version-2.4.2/cookbooks-retention-expiry.md
index b9353b5..738cf42 100644
--- a/site2/website-next/versioned_docs/version-2.4.2/cookbooks-retention-expiry.md
+++ b/site2/website-next/versioned_docs/version-2.4.2/cookbooks-retention-expiry.md
@@ -2,7 +2,6 @@
 id: cookbooks-retention-expiry
 title: Message retention and expiry
 sidebar_label: "Message retention and expiry"
-original_id: cookbooks-retention-expiry
 ---
 
 import Tabs from '@theme/Tabs';
@@ -36,7 +35,7 @@ By default, when a Pulsar message arrives at a broker, the message is stored unt
 
 Retention policies are useful when you use the Reader interface. The Reader interface does not use acknowledgements, and messages do not exist within backlogs. It is required to configure retention for Reader-only use cases.
 
-When you set a retention policy on topics in a namespace, you must set **both** a *size limit* and a *time limit*. You can refer to the following table to set retention policies in `pulsar-admin` and Java.
+When you set a retention policy on topics in a namespace, you must set **both** a *size limit* (via `defaultRetentionSizeInMB`) and a *time limit* (via `defaultRetentionTimeInMinutes`) . You can refer to the following table to set retention policies in `pulsar-admin` and Java.
 
 |Time limit|Size limit| Message retention      |
 |----------|----------|------------------------|
@@ -152,7 +151,10 @@ admin.namespaces().setRetention(namespace, policies);
 
 You can fetch the retention policy for a namespace by specifying the namespace. The output will be a JSON object with two keys: `retentionTimeInMinutes` and `retentionSizeInMB`.
 
-#### pulsar-admin
+<Tabs 
+  defaultValue="pulsar-admin"
+  values={[{"label":"pulsar-admin","value":"pulsar-admin"},{"label":"REST API","value":"REST API"},{"label":"Java","value":"Java"}]}>
+<TabItem value="pulsar-admin">
 
 Use the [`get-retention`](reference-pulsar-admin.md#namespaces) subcommand and specify the namespace.
 
@@ -168,11 +170,13 @@ $ pulsar-admin namespaces get-retention my-tenant/my-ns
 
 ```
 
-#### REST API
+</TabItem>
+<TabItem value="REST API">
 
 {@inject: endpoint|GET|/admin/v2/namespaces/:tenant/:namespace/retention|operation/getRetention?version=@pulsar:version_number@}
 
-#### Java
+</TabItem>
+<TabItem value="Java">
 
 ```java
 
@@ -180,15 +184,17 @@ admin.namespaces().getRetention(namespace);
 
 ```
 
+</TabItem>
+
+</Tabs>
+
 ## Backlog quotas
 
 *Backlogs* are sets of unacknowledged messages for a topic that have been stored by bookies. Pulsar stores all unacknowledged messages in backlogs until they are processed and acknowledged.
 
-You can control the allowable size of backlogs, at the namespace level, using *backlog quotas*. Setting a backlog quota involves setting:
+You can control the allowable size and/or time of backlogs, at the namespace level, using *backlog quotas*. Setting a backlog quota involves setting:
 
-TODO: Expand on is this per backlog or per topic?
-
-* an allowable *size threshold* for each topic in the namespace
+* an allowable *size and/or time threshold* for each topic in the namespace
 * a *retention policy* that determines which action the [broker](reference-terminology.md#broker) takes if the threshold is exceeded.
 
 The following retention policies are available:
@@ -210,9 +216,12 @@ Backlog quotas are handled at the namespace level. They can be managed via:
 
 You can set a size and/or time threshold and backlog retention policy for all of the topics in a [namespace](reference-terminology.md#namespace) by specifying the namespace, a size limit and/or a time limit in second, and a policy by name.
 
-#### pulsar-admin
+<Tabs 
+  defaultValue="pulsar-admin"
+  values={[{"label":"pulsar-admin","value":"pulsar-admin"},{"label":"REST API","value":"REST API"},{"label":"Java","value":"Java"}]}>
+<TabItem value="pulsar-admin">
 
-Use the [`set-backlog-quota`](reference-pulsar-admin.md#namespaces) subcommand and specify a namespace, a size limit using the `-l`/`--limit` flag, and a retention policy using the `-p`/`--policy` flag.
+Use the [`set-backlog-quota`](reference-pulsar-admin.md#namespaces) subcommand and specify a namespace, a size limit using the `-l`/`--limit` , `-lt`/`--limitTime` flag to limit backlog, a retention policy using the `-p`/`--policy` flag and a policy type using `-t`/`--type` (default is destination_storage).
 
 ##### Example
 
@@ -220,16 +229,26 @@ Use the [`set-backlog-quota`](reference-pulsar-admin.md#namespaces) subcommand a
 
 $ pulsar-admin namespaces set-backlog-quota my-tenant/my-ns \
   --limit 2G \
-  --limitTime 36000 \
   --policy producer_request_hold
 
 ```
 
-#### REST API
+```shell
+
+$ pulsar-admin namespaces set-backlog-quota my-tenant/my-ns/my-topic \
+--limitTime 3600 \
+--policy producer_request_hold \
+--type message_age
+
+```
+
+</TabItem>
+<TabItem value="REST API">
 
 {@inject: endpoint|POST|/admin/v2/namespaces/:tenant/:namespace/backlogQuota|operation/getBacklogQuotaMap?version=@pulsar:version_number@}
 
-#### Java
+</TabItem>
+<TabItem value="Java">
 
 ```java
 
@@ -240,11 +259,18 @@ admin.namespaces().setBacklogQuota(namespace, quota);
 
 ```
 
+</TabItem>
+
+</Tabs>
+
 ### Get backlog threshold and backlog retention policy
 
 You can see which size threshold and backlog retention policy has been applied to a namespace.
 
-#### pulsar-admin
+<Tabs 
+  defaultValue="pulsar-admin"
+  values={[{"label":"pulsar-admin","value":"pulsar-admin"},{"label":"REST API","value":"REST API"},{"label":"Java","value":"Java"}]}>
+<TabItem value="pulsar-admin">
 
 Use the [`get-backlog-quotas`](reference-pulsar-admin.md#pulsar-admin-namespaces-get-backlog-quotas) subcommand and specify a namespace. Here's an example:
 
@@ -260,11 +286,13 @@ $ pulsar-admin namespaces get-backlog-quotas my-tenant/my-ns
 
 ```
 
-#### REST API
+</TabItem>
+<TabItem value="REST API">
 
 {@inject: endpoint|GET|/admin/v2/namespaces/:tenant/:namespace/backlogQuotaMap|operation/getBacklogQuotaMap?version=@pulsar:version_number@}
 
-#### Java
+</TabItem>
+<TabItem value="Java">
 
 ```java
 
@@ -273,11 +301,18 @@ Map<BacklogQuota.BacklogQuotaType,BacklogQuota> quotas =
 
 ```
 
+</TabItem>
+
+</Tabs>
+
 ### Remove backlog quotas
 
-#### pulsar-admin
+<Tabs 
+  defaultValue="pulsar-admin"
+  values={[{"label":"pulsar-admin","value":"pulsar-admin"},{"label":"REST API","value":"REST API"},{"label":"Java","value":"Java"}]}>
+<TabItem value="pulsar-admin">
 
-Use the [`remove-backlog-quota`](reference-pulsar-admin.md#pulsar-admin-namespaces-remove-backlog-quota) subcommand and specify a namespace. Here's an example:
+Use the [`remove-backlog-quota`](reference-pulsar-admin.md#pulsar-admin-namespaces-remove-backlog-quota) subcommand and specify a namespace, use `t`/`--type` to specify backlog type to remove(default is destination_storage). Here's an example:
 
 ```shell
 
@@ -285,11 +320,13 @@ $ pulsar-admin namespaces remove-backlog-quota my-tenant/my-ns
 
 ```
 
-#### REST API
+</TabItem>
+<TabItem value="REST API">
 
 {@inject: endpoint|DELETE|/admin/v2/namespaces/:tenant/:namespace/backlogQuota|operation/removeBacklogQuota?version=@pulsar:version_number@}
 
-#### Java
+</TabItem>
+<TabItem value="Java">
 
 ```java
 
@@ -297,6 +334,10 @@ admin.namespaces().removeBacklogQuota(namespace);
 
 ```
 
+</TabItem>
+
+</Tabs>
+
 ### Clear backlog
 
 #### pulsar-admin
@@ -319,7 +360,10 @@ By default, Pulsar stores all unacknowledged messages forever. This can lead to
 
 ### Set the TTL for a namespace
 
-#### pulsar-admin
+<Tabs 
+  defaultValue="pulsar-admin"
+  values={[{"label":"pulsar-admin","value":"pulsar-admin"},{"label":"REST API","value":"REST API"},{"label":"Java","value":"Java"}]}>
+<TabItem value="pulsar-admin">
 
 Use the [`set-message-ttl`](reference-pulsar-admin.md#pulsar-admin-namespaces-set-message-ttl) subcommand and specify a namespace and a TTL (in seconds) using the `-ttl`/`--messageTTL` flag.
 
@@ -332,11 +376,13 @@ $ pulsar-admin namespaces set-message-ttl my-tenant/my-ns \
 
 ```
 
-#### REST API
+</TabItem>
+<TabItem value="REST API">
 
 {@inject: endpoint|POST|/admin/v2/namespaces/:tenant/:namespace/messageTTL|operation/setNamespaceMessageTTL?version=@pulsar:version_number@}
 
-#### Java
+</TabItem>
+<TabItem value="Java">
 
 ```java
 
@@ -344,9 +390,16 @@ admin.namespaces().setNamespaceMessageTTL(namespace, ttlInSeconds);
 
 ```
 
+</TabItem>
+
+</Tabs>
+
 ### Get the TTL configuration for a namespace
 
-#### pulsar-admin
+<Tabs 
+  defaultValue="pulsar-admin"
+  values={[{"label":"pulsar-admin","value":"pulsar-admin"},{"label":"REST API","value":"REST API"},{"label":"Java","value":"Java"}]}>
+<TabItem value="pulsar-admin">
 
 Use the [`get-message-ttl`](reference-pulsar-admin.md#pulsar-admin-namespaces-get-message-ttl) subcommand and specify a namespace.
 
@@ -359,11 +412,13 @@ $ pulsar-admin namespaces get-message-ttl my-tenant/my-ns
 
 ```
 
-#### REST API
+</TabItem>
+<TabItem value="REST API">
 
 {@inject: endpoint|GET|/admin/v2/namespaces/:tenant/:namespace/messageTTL|operation/getNamespaceMessageTTL?version=@pulsar:version_number@}
 
-#### Java
+</TabItem>
+<TabItem value="Java">
 
 ```java
 
@@ -371,9 +426,16 @@ admin.namespaces().getNamespaceMessageTTL(namespace)
 
 ```
 
+</TabItem>
+
+</Tabs>
+
 ### Remove the TTL configuration for a namespace
 
-#### pulsar-admin
+<Tabs 
+  defaultValue="pulsar-admin"
+  values={[{"label":"pulsar-admin","value":"pulsar-admin"},{"label":"REST API","value":"REST API"},{"label":"Java","value":"Java"}]}>
+<TabItem value="pulsar-admin">
 
 Use the [`remove-message-ttl`](reference-pulsar-admin.md#pulsar-admin-namespaces-remove-message-ttl) subcommand and specify a namespace.
 
@@ -385,11 +447,13 @@ $ pulsar-admin namespaces remove-message-ttl my-tenant/my-ns
 
 ```
 
-#### REST API
+</TabItem>
+<TabItem value="REST API">
 
 {@inject: endpoint|DELETE|/admin/v2/namespaces/:tenant/:namespace/messageTTL|operation/removeNamespaceMessageTTL?version=@pulsar:version_number@}
 
-#### Java
+</TabItem>
+<TabItem value="Java">
 
 ```java
 
@@ -397,6 +461,10 @@ admin.namespaces().removeNamespaceMessageTTL(namespace)
 
 ```
 
+</TabItem>
+
+</Tabs>
+
 ## Delete messages from namespaces
 
 If you do not have any retention period and that you never have much of a backlog, the upper limit for retaining messages, which are acknowledged, equals to the Pulsar segment rollover period + entry log rollover period + (garbage collection interval * garbage collection ratios).
diff --git a/site2/website-next/versioned_docs/version-2.4.2/cookbooks-tiered-storage.md b/site2/website-next/versioned_docs/version-2.4.2/cookbooks-tiered-storage.md
index 8f6a7fb..f2ea50d 100644
--- a/site2/website-next/versioned_docs/version-2.4.2/cookbooks-tiered-storage.md
+++ b/site2/website-next/versioned_docs/version-2.4.2/cookbooks-tiered-storage.md
@@ -2,14 +2,15 @@
 id: cookbooks-tiered-storage
 title: Tiered Storage
 sidebar_label: "Tiered Storage"
-original_id: cookbooks-tiered-storage
 ---
 
 Pulsar's **Tiered Storage** feature allows older backlog data to be offloaded to long term storage, thereby freeing up space in BookKeeper and reducing storage costs. This cookbook walks you through using tiered storage in your Pulsar cluster.
 
-* Tiered storage uses [Apache jclouds](https://jclouds.apache.org) to support [Amazon S3](https://aws.amazon.com/s3/) and [Google Cloud Storage](https://cloud.google.com/storage/)(GCS for short) for long term storage. With Jclouds, it is easy to add support for more [cloud storage providers](https://jclouds.apache.org/reference/providers/#blobstore-providers) in the future.
+* Tiered storage uses [Apache jclouds](https://jclouds.apache.org) to support [Amazon S3](https://aws.amazon.com/s3/) and [Google Cloud Storage](https://cloud.google.com/storage/)(GCS for short)
+for long term storage. With Jclouds, it is easy to add support for more [cloud storage providers](https://jclouds.apache.org/reference/providers/#blobstore-providers) in the future.
 
-* Tiered storage uses [Apache Hadoop](http://hadoop.apache.org/) to support filesystem for long term storage. With Hadoop, it is easy to add support for more filesystem in the future.
+* Tiered storage uses [Apache Hadoop](http://hadoop.apache.org/) to support filesystem for long term storage. 
+With Hadoop, it is easy to add support for more filesystem in the future.
 
 ## When should I use Tiered Storage?
 
diff --git a/site2/website-next/versioned_docs/version-2.4.2/deploy-aws.md b/site2/website-next/versioned_docs/version-2.4.2/deploy-aws.md
index 6323051..2034749 100644
--- a/site2/website-next/versioned_docs/version-2.4.2/deploy-aws.md
+++ b/site2/website-next/versioned_docs/version-2.4.2/deploy-aws.md
@@ -2,7 +2,6 @@
 id: deploy-aws
 title: Deploying a Pulsar cluster on AWS using Terraform and Ansible
 sidebar_label: "Amazon Web Services"
-original_id: deploy-aws
 ---
 
 > For instructions on deploying a single Pulsar cluster manually rather than using Terraform and Ansible, see [Deploying a Pulsar cluster on bare metal](deploy-bare-metal.md). For instructions on manually deploying a multi-cluster Pulsar instance, see [Deploying a Pulsar instance on bare metal](deploy-bare-metal-multi-cluster).
@@ -148,7 +147,7 @@ Variable name | Description | Default
 When you run the Ansible playbook, the following AWS resources are used:
 
 * 9 total [Elastic Compute Cloud](https://aws.amazon.com/ec2) (EC2) instances running the [ami-9fa343e7](https://access.redhat.com/articles/3135091) Amazon Machine Image (AMI), which runs [Red Hat Enterprise Linux (RHEL) 7.4](https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/7/html-single/7.4_release_notes/index). By default, that includes:
-  * 3 small VMs for ZooKeeper ([t2.small](https://www.ec2instances.info/?selected=t2.small) instances)
+  * 3 small VMs for ZooKeeper ([t3.small](https://www.ec2instances.info/?selected=t3.small) instances)
   * 3 larger VMs for BookKeeper [bookies](reference-terminology.md#bookie) ([i3.xlarge](https://www.ec2instances.info/?selected=i3.xlarge) instances)
   * 2 larger VMs for Pulsar [brokers](reference-terminology.md#broker) ([c5.2xlarge](https://www.ec2instances.info/?selected=c5.2xlarge) instances)
   * 1 larger VMs for Pulsar [proxy](reference-terminology.md#proxy) ([c5.2xlarge](https://www.ec2instances.info/?selected=c5.2xlarge) instances)
diff --git a/site2/website-next/versioned_docs/version-2.4.2/deploy-bare-metal-multi-cluster.md b/site2/website-next/versioned_docs/version-2.4.2/deploy-bare-metal-multi-cluster.md
index 643c122..9dd2526 100644
--- a/site2/website-next/versioned_docs/version-2.4.2/deploy-bare-metal-multi-cluster.md
+++ b/site2/website-next/versioned_docs/version-2.4.2/deploy-bare-metal-multi-cluster.md
@@ -2,38 +2,30 @@
 id: deploy-bare-metal-multi-cluster
 title: Deploying a multi-cluster on bare metal
 sidebar_label: "Bare metal multi-cluster"
-original_id: deploy-bare-metal-multi-cluster
 ---
 
 :::tip
 
-1. Single-cluster Pulsar installations should be sufficient for all but the most ambitious use cases. If you are interested in experimenting with
-Pulsar or using it in a startup or on a single team, you had better opt for a single cluster. For instructions on deploying a single cluster,
-see the guide [here](deploy-bare-metal).
-2. If you want to use all builtin [Pulsar IO](io-overview) connectors in your Pulsar deployment, you need to download `apache-pulsar-io-connectors`
-package and install `apache-pulsar-io-connectors` under `connectors` directory in the pulsar directory on every broker node or on every function-worker node if you
-run a separate cluster of function workers for [Pulsar Functions](functions-overview).
-3. If you want to use [Tiered Storage](concepts-tiered-storage) feature in your Pulsar deployment, you need to download `apache-pulsar-offloaders`
-package and install `apache-pulsar-offloaders` under `offloaders` directory in the pulsar directory on every broker node. For more details of how to configure
-this feature, you can refer to the [Tiered storage cookbook](cookbooks-tiered-storage).
+1. You can use single-cluster Pulsar installation in most use cases, such as experimenting with Pulsar or using Pulsar in a startup or in a single team. If you need to run a multi-cluster Pulsar instance, see the [guide](deploy-bare-metal-multi-cluster).
+2. If you want to use all built-in [Pulsar IO](io-overview.md) connectors, you need to download `apache-pulsar-io-connectors`package and install `apache-pulsar-io-connectors` under `connectors` directory in the pulsar directory on every broker node or on every function-worker node if you have run a separate cluster of function workers for [Pulsar Functions](functions-overview).
+3. If you want to use [Tiered Storage](concepts-tiered-storage.md) feature in your Pulsar deployment, you need to download `apache-pulsar-offloaders`package and install `apache-pulsar-offloaders` under `offloaders` directory in the Pulsar directory on every broker node. For more details of how to configure this feature, you can refer to the [Tiered storage cookbook](cookbooks-tiered-storage).
 
 :::
 
-A Pulsar *instance* consists of multiple Pulsar clusters working in unison. You can distribute clusters across data centers or geographical regions and replicate the clusters amongst themselves using [geo-replication](administration-geo). Deploying a multi-cluster Pulsar instance involves the following basic steps:
+A Pulsar instance consists of multiple Pulsar clusters working in unison. You can distribute clusters across data centers or geographical regions and replicate the clusters amongst themselves using [geo-replication](administration-geo).Deploying a  multi-cluster Pulsar instance consists of the following steps:
 
-* Deploying two separate [ZooKeeper](#deploy-zookeeper) quorums: a [local](#deploy-local-zookeeper) quorum for each cluster in the instance and a [configuration store](#configuration-store) quorum for instance-wide tasks
-* Initializing [cluster metadata](#cluster-metadata-initialization) for each cluster
-* Deploying a [BookKeeper cluster](#deploy-bookkeeper) of bookies in each Pulsar cluster
-* Deploying [brokers](#deploy-brokers) in each Pulsar cluster
+1. Deploying two separate ZooKeeper quorums: a local quorum for each cluster in the instance and a configuration store quorum for instance-wide tasks
+2. Initializing cluster metadata for each cluster
+3. Deploying a BookKeeper cluster of bookies in each Pulsar cluster
+4. Deploying brokers in each Pulsar cluster
 
-If you want to deploy a single Pulsar cluster, see [Clusters and Brokers](getting-started-standalone.md#start-the-cluster).
 
 > #### Run Pulsar locally or on Kubernetes?
-> This guide shows you how to deploy Pulsar in production in a non-Kubernetes environment. If you want to run a standalone Pulsar cluster on a single machine for development purposes, see the [Setting up a local cluster](getting-started-standalone.md) guide. If you want to run Pulsar on [Kubernetes](https://kubernetes.io), see the [Pulsar on Kubernetes](deploy-kubernetes) guide, which includes sections on running Pulsar on Kubernetes on [Google Kubernetes Engine](deploy-kubernetes#pulsar [...]
+> This guide shows you how to deploy Pulsar in production in a non-Kubernetes environment. If you want to run a standalone Pulsar cluster on a single machine for development purposes, see the [Setting up a local cluster](getting-started-standalone.md) guide. If you want to run Pulsar on [Kubernetes](https://kubernetes.io), see the [Pulsar on Kubernetes](deploy-kubernetes) guide, which includes sections on running Pulsar on Kubernetes, on Google Kubernetes Engine and on Amazon Web Services.
 
 ## System requirement
 
-Currently, Pulsar is available for 64-bit **macOS**, **Linux**, and **Windows**. To use Pulsar, you need to install 64-bit JRE/JDK 8 or later versions.
+Currently, Pulsar is available for 64-bit **macOS**, **Linux**, and **Windows**. You need to install 64-bit JRE/JDK 8 or later versions.
 
 :::note
 
@@ -68,8 +60,6 @@ $ cd apache-pulsar-@pulsar:version@
 
 ```
 
-## What your package contains
-
 The Pulsar binary package initially contains the following directories:
 
 Directory | Contains
@@ -93,17 +83,17 @@ Directory | Contains
 
 Each Pulsar instance relies on two separate ZooKeeper quorums.
 
-* [Local ZooKeeper](#deploy-local-zookeeper) operates at the cluster level and provides cluster-specific configuration management and coordination. Each Pulsar cluster needs to have a dedicated ZooKeeper cluster.
-* [Configuration Store](#deploy-the-configuration-store) operates at the instance level and provides configuration management for the entire system (and thus across clusters). An independent cluster of machines or the same machines that local ZooKeeper uses can provide the configuration store quorum.
+* Local ZooKeeper operates at the cluster level and provides cluster-specific configuration management and coordination. Each Pulsar cluster needs a dedicated ZooKeeper cluster.
+* Configuration Store operates at the instance level and provides configuration management for the entire system (and thus across clusters). An independent cluster of machines or the same machines that local ZooKeeper uses can provide the configuration store quorum.
 
-The configuration store quorum can be provided by an independent cluster of machines or by the same machines used by local ZooKeeper.
+You can use an independent cluster of machines or the same machines used by local ZooKeeper to provide the configuration store quorum.
 
 
 ### Deploy local ZooKeeper
 
 ZooKeeper manages a variety of essential coordination-related and configuration-related tasks for Pulsar.
 
-You need to stand up one local ZooKeeper cluster *per Pulsar cluster* for deploying a Pulsar instance. 
+You need to stand up one local ZooKeeper cluster per Pulsar cluster for deploying a Pulsar instance. 
 
 To begin, add all ZooKeeper servers to the quorum configuration specified in the [`conf/zookeeper.conf`](reference-configuration.md#zookeeper) file. Add a `server.N` line for each node in the cluster to the configuration, where `N` is the number of the ZooKeeper node. The following is an example for a three-node cluster:
 
@@ -117,7 +107,11 @@ server.3=zk3.us-west.example.com:2888:3888
 
 On each host, you need to specify the ID of the node in the `myid` file of each node, which is in `data/zookeeper` folder of each server by default (you can change the file location via the [`dataDir`](reference-configuration.md#zookeeper-dataDir) parameter).
 
-> See the [Multi-server setup guide](https://zookeeper.apache.org/doc/r3.4.10/zookeeperAdmin.html#sc_zkMulitServerSetup) in the ZooKeeper documentation for detailed information on `myid` and more.
+:::tip
+
+See the [Multi-server setup guide](https://zookeeper.apache.org/doc/r3.4.10/zookeeperAdmin.html#sc_zkMulitServerSetup) in the ZooKeeper documentation for detailed information on `myid` and more.
+
+:::
 
 On a ZooKeeper server at `zk1.us-west.example.com`, for example, you could set the `myid` value like this:
 
@@ -140,15 +134,15 @@ $ bin/pulsar-daemon start zookeeper
 
 ### Deploy the configuration store 
 
-The ZooKeeper cluster that is configured and started up in the section above is a *local* ZooKeeper cluster that you can use to manage a single Pulsar cluster. In addition to a local cluster, however, a full Pulsar instance also requires a configuration store for handling some instance-level configuration and coordination tasks.
+The ZooKeeper cluster configured and started up in the section above is a local ZooKeeper cluster that you can use to manage a single Pulsar cluster. In addition to a local cluster, however, a full Pulsar instance also requires a configuration store for handling some instance-level configuration and coordination tasks.
 
-If you deploy a [single-cluster](#single-cluster-pulsar-instance) instance, you do not need a separate cluster for the configuration store. If, however, you deploy a [multi-cluster](#multi-cluster-pulsar-instance) instance, you should stand up a separate ZooKeeper cluster for configuration tasks.
+If you deploy a single-cluster instance, you do not need a separate cluster for the configuration store. If, however, you deploy a multi-cluster instance, you should stand up a separate ZooKeeper cluster for configuration tasks.
 
 #### Single-cluster Pulsar instance
 
 If your Pulsar instance consists of just one cluster, then you can deploy a configuration store on the same machines as the local ZooKeeper quorum but run on different TCP ports.
 
-To deploy a ZooKeeper configuration store in a single-cluster instance, add the same ZooKeeper servers that the local quorum uses to the configuration file in [`conf/global_zookeeper.conf`](reference-configuration.md#configuration-store) using the same method for [local ZooKeeper](#local-zookeeper), but make sure to use a different port (2181 is the default for ZooKeeper). The following is an example that uses port 2184 for a three-node ZooKeeper cluster:
+To deploy a ZooKeeper configuration store in a single-cluster instance, add the same ZooKeeper servers that the local quorum. You need to use the configuration file in [`conf/global_zookeeper.conf`](reference-configuration.md#configuration-store) using the same method for [local ZooKeeper](#local-zookeeper), but make sure to use a different port (2181 is the default for ZooKeeper). The following is an example that uses port 2184 for a three-node ZooKeeper cluster:
 
 ```properties
 
@@ -165,13 +159,11 @@ As before, create the `myid` files for each server on `data/global-zookeeper/myi
 
 When you deploy a global Pulsar instance, with clusters distributed across different geographical regions, the configuration store serves as a highly available and strongly consistent metadata store that can tolerate failures and partitions spanning whole regions.
 
-The key here is to make sure the ZK quorum members are spread across at least 3 regions and that other regions run as observers.
+The key here is to make sure the ZK quorum members are spread across at least 3 regions, and other regions run as observers.
 
-Again, given the very low expected load on the configuration store servers, you can
-share the same hosts used for the local ZooKeeper quorum.
+Again, given the very low expected load on the configuration store servers, you can share the same hosts used for the local ZooKeeper quorum.
 
-For example, assume a Pulsar instance with the following clusters `us-west`,
-`us-east`, `us-central`, `eu-central`, `ap-south`. Also assume, each cluster has its own local ZK servers named such as the following: 
+For example, assume a Pulsar instance with the following clusters `us-west`, `us-east`, `us-central`, `eu-central`, `ap-south`. Also assume, each cluster has its own local ZK servers named such as the following: 
 
 ```
 
@@ -179,8 +171,7 @@ zk[1-3].${CLUSTER}.example.com
 
 ```
 
-In this scenario if you want to pick the quorum participants from few clusters and
-let all the others be ZK observers. For example, to form a 7 servers quorum, you can pick 3 servers from `us-west`, 2 from `us-central` and 2 from `us-east`.
+In this scenario if you want to pick the quorum participants from few clusters and let all the others be ZK observers. For example, to form a 7 servers quorum, you can pick 3 servers from `us-west`, 2 from `us-central` and 2 from `us-east`.
 
 This method guarantees that writes to configuration store is possible even if one of these regions is unreachable.
 
@@ -227,7 +218,7 @@ $ bin/pulsar-daemon start configuration-store
 
 ## Cluster metadata initialization
 
-Once you set up the cluster-specific ZooKeeper and configuration store quorums for your instance, you need to write some metadata to ZooKeeper for each cluster in your instance. **you only needs to write these metadata once**.
+Once you set up the cluster-specific ZooKeeper and configuration store quorums for your instance, you need to write some metadata to ZooKeeper for each cluster in your instance. **you only need to write these metadata once**.
 
 You can initialize this metadata using the [`initialize-cluster-metadata`](reference-cli-tools.md#pulsar-initialize-cluster-metadata) command of the [`pulsar`](reference-cli-tools.md#pulsar) CLI tool. The following is an example:
 
@@ -260,7 +251,7 @@ Make sure to run `initialize-cluster-metadata` for each cluster in your instance
 
 BookKeeper provides [persistent message storage](concepts-architecture-overview.md#persistent-storage) for Pulsar.
 
-Each Pulsar broker needs to have its own cluster of bookies. The BookKeeper cluster shares a local ZooKeeper quorum with the Pulsar cluster.
+Each Pulsar broker needs its own cluster of bookies. The BookKeeper cluster shares a local ZooKeeper quorum with the Pulsar cluster.
 
 ### Configure bookies
 
@@ -280,7 +271,7 @@ $ bin/pulsar-daemon start bookie
 
 You can verify that the bookie works properly using the `bookiesanity` command for the [BookKeeper shell](reference-cli-tools.md#bookkeeper-shell):
 
-```shell
+```bash
 
 $ bin/bookkeeper shell bookiesanity
 
@@ -304,7 +295,7 @@ Bookie hosts are responsible for storing message data on disk. In order for book
 Message entries written to bookies are always synced to disk before returning an acknowledgement to the Pulsar broker. To ensure low write latency, BookKeeper is
 designed to use multiple devices:
 
-* A **journal** to ensure durability. For sequential writes, having fast [fsync](https://linux.die.net/man/2/fsync) operations on bookie hosts is critical. Typically, small and fast [solid-state drives](https://en.wikipedia.org/wiki/Solid-state_drive) (SSDs) should suffice, or [hard disk drives](https://en.wikipedia.org/wiki/Hard_disk_drive) (HDDs) with a [RAID](https://en.wikipedia.org/wiki/RAID)s controller and a battery-backed write cache. Both solutions can reach fsync latency of ~0.4 ms.
+* A **journal** to ensure durability. For sequential writes, having fast [fsync](https://linux.die.net/man/2/fsync) operations on bookie hosts is critical. Typically, small and fast [solid-state drives](https://en.wikipedia.org/wiki/Solid-state_drive) (SSDs) should suffice, or [hard disk drives](https://en.wikipedia.org/wiki/Hard_disk_drive) (HDDs) with a [RAID](https://en.wikipedia.org/wiki/RAID) controller and a battery-backed write cache. Both solutions can reach fsync latency of ~0.4 ms.
 * A **ledger storage device** is where data is stored until all consumers acknowledge the message. Writes happen in the background, so write I/O is not a big concern. Reads happen sequentially most of the time and the backlog is drained only in case of consumer drain. To store large amounts of data, a typical configuration involves multiple HDDs with a RAID controller.
 
 
@@ -371,39 +362,13 @@ $ bin/pulsar broker
 
 ## Service discovery
 
-[Clients](getting-started-clients) connecting to Pulsar brokers need to be able to communicate with an entire Pulsar instance using a single URL. Pulsar provides a built-in service discovery mechanism that you can set up using the instructions [immediately below](#service-discovery-setup).
+[Clients](getting-started-clients) connecting to Pulsar brokers need to communicate with an entire Pulsar instance using a single URL.
 
-You can also use your own service discovery system if you want. If you use your own system, you only need to satisfy just one requirement: when a client performs an HTTP request to an [endpoint](reference-configuration) for a Pulsar cluster, such as `http://pulsar.us-west.example.com:8080`, the client needs to be redirected to *some* active broker in the desired cluster, whether via DNS, an HTTP or IP redirect, or some other means.
+You can use your own service discovery system. If you use your own system, you only need to satisfy just one requirement: when a client performs an HTTP request to an [endpoint](reference-configuration) for a Pulsar cluster, such as `http://pulsar.us-west.example.com:8080`, the client needs to be redirected to some active brokers in the desired cluster, whether via DNS, an HTTP or IP redirect, or some other means.
 
-> #### Service discovery already provided by many scheduling systems
+> **Service discovery already provided by many scheduling systems**
 > Many large-scale deployment systems, such as [Kubernetes](deploy-kubernetes), have service discovery systems built in. If you run Pulsar on such a system, you may not need to provide your own service discovery mechanism.
 
-
-### Service discovery setup
-
-The service discovery mechanism that included with Pulsar maintains a list of active brokers, which stored in ZooKeeper, and supports lookup using HTTP and also the [binary protocol](developing-binary-protocol) of Pulsar.
-
-To get started setting up the built-in service of discovery of Pulsar, you need to change a few parameters in the [`conf/discovery.conf`](reference-configuration.md#service-discovery) configuration file. Set the [`zookeeperServers`](reference-configuration.md#service-discovery-zookeeperServers) parameter to the ZooKeeper quorum connection string of the cluster and the [`configurationStoreServers`](reference-configuration.md#service-discovery-configurationStoreServers) setting to the [con [...]
-store](reference-terminology.md#configuration-store) quorum connection string.
-
-```properties
-
-# Zookeeper quorum connection string
-zookeeperServers=zk1.us-west.example.com:2181,zk2.us-west.example.com:2181,zk3.us-west.example.com:2181
-
-# Global configuration store connection string
-configurationStoreServers=zk1.us-west.example.com:2184,zk2.us-west.example.com:2184,zk3.us-west.example.com:2184
-
-```
-
-To start the discovery service:
-
-```shell
-
-$ bin/pulsar-daemon start discovery
-
-```
-
 ## Admin client and verification
 
 At this point your Pulsar instance should be ready to use. You can now configure client machines that can serve as [administrative clients](admin-api-overview) for each cluster. You can use the [`conf/client.conf`](reference-configuration.md#client) configuration file to configure admin clients.
diff --git a/site2/website-next/versioned_docs/version-2.4.1/develop-bare-metal.md b/site2/website-next/versioned_docs/version-2.4.2/deploy-bare-metal.md
similarity index 100%
rename from site2/website-next/versioned_docs/version-2.4.1/develop-bare-metal.md
rename to site2/website-next/versioned_docs/version-2.4.2/deploy-bare-metal.md
diff --git a/site2/website-next/versioned_docs/version-2.4.2/deploy-dcos.md b/site2/website-next/versioned_docs/version-2.4.2/deploy-dcos.md
index f5f8d1f..07f446e 100644
--- a/site2/website-next/versioned_docs/version-2.4.2/deploy-dcos.md
+++ b/site2/website-next/versioned_docs/version-2.4.2/deploy-dcos.md
@@ -7,18 +7,17 @@ original_id: deploy-dcos
 
 :::tip
 
-If you want to enable all builtin [Pulsar IO](io-overview) connectors in your Pulsar deployment, you can choose to use `apachepulsar/pulsar-all` image instead of
-`apachepulsar/pulsar` image. `apachepulsar/pulsar-all` image has already bundled [all builtin connectors](io-overview.md#working-with-connectors).
+To enable all built-in [Pulsar IO](io-overview) connectors in your Pulsar deploymente, we recommend you use `apachepulsar/pulsar-all` image instead of `apachepulsar/pulsar` image; the former has already bundled [all built-in connectors](io-overview.md#working-with-connectors).
 
 :::
 
-[DC/OS](https://dcos.io/) (the <strong>D</strong>ata<strong>C</strong>enter <strong>O</strong>perating <strong>S</strong>ystem) is a distributed operating system used for deploying and managing applications and systems on [Apache Mesos](http://mesos.apache.org/). DC/OS is an open-source tool that [Mesosphere](https://mesosphere.com/) creates and maintains .
+[DC/OS](https://dcos.io/) (the <strong>D</strong>ata<strong>C</strong>enter <strong>O</strong>perating <strong>S</strong>ystem) is a distributed operating system for deploying and managing applications and systems on [Apache Mesos](http://mesos.apache.org/). DC/OS is an open-source tool created and maintained by [Mesosphere](https://mesosphere.com/).
 
 Apache Pulsar is available as a [Marathon Application Group](https://mesosphere.github.io/marathon/docs/application-groups.html), which runs multiple applications as manageable sets.
 
 ## Prerequisites
 
-In order to run Pulsar on DC/OS, you need the following:
+You need to prepare your environment before running Pulsar on DC/OS.
 
 * DC/OS version [1.9](https://docs.mesosphere.com/1.9/) or higher
 * A [DC/OS cluster](https://docs.mesosphere.com/1.9/installing/) with at least three agent nodes
@@ -37,7 +36,7 @@ Each node in the DC/OS-managed Mesos cluster must have at least:
 * 4 GB of memory
 * 60 GB of total persistent disk
 
-Alternatively, you can change the configuration in `PulsarGroups.json` according to match your resources of DC/OS cluster.
+Alternatively, you can change the configuration in `PulsarGroups.json` accordingly to match your resources of the DC/OS cluster.
 
 ## Deploy Pulsar using the DC/OS command interface
 
@@ -56,9 +55,9 @@ This command deploys Docker container instances in three groups, which together
 * 1 [Prometheus](http://prometheus.io/) instance and 1 [Grafana](https://grafana.com/) instance
 
 
-> When you run DC/OS, a ZooKeeper cluster already runs at `master.mesos:2181`, thus you do not have to install or start up ZooKeeper separately.
+> When you run DC/OS, a ZooKeeper cluster will be running at `master.mesos:2181`, thus you do not have to install or start up ZooKeeper separately.
 
-After executing the `dcos` command above, click on the **Services** tab in the DC/OS [GUI interface](https://docs.mesosphere.com/latest/gui/), which you can access at [http://m1.dcos](http://m1.dcos) in this example. You should see several applications in the process of deploying.
+After executing the `dcos` command above, click the **Services** tab in the DC/OS [GUI interface](https://docs.mesosphere.com/latest/gui/), which you can access at [http://m1.dcos](http://m1.dcos) in this example. You should see several applications during the deployment.
 
 ![DC/OS command executed](/assets/dcos_command_execute.png)
 
@@ -66,15 +65,15 @@ After executing the `dcos` command above, click on the **Services** tab in the D
 
 ## The BookKeeper group
 
-To monitor the status of the BookKeeper cluster deployment, click on the **bookkeeper** group in the parent **pulsar** group.
+To monitor the status of the BookKeeper cluster deployment, click the **bookkeeper** group in the parent **pulsar** group.
 
 ![DC/OS bookkeeper status](/assets/dcos_bookkeeper_status.png)
 
-At this point, 3 [bookies](reference-terminology.md#bookie) should be shown as green, which means that the bookies have been deployed successfully and are now running.
+At this point, the status of the 3 [bookies](reference-terminology.md#bookie) are green, which means that the bookies have been deployed successfully and are running.
  
 ![DC/OS bookkeeper running](/assets/dcos_bookkeeper_run.png)
  
-You can also click into each bookie instance to get more detailed information, such as the bookie running log.
+You can also click each bookie instance to get more detailed information, such as the bookie running log.
 
 ![DC/OS bookie log](/assets/dcos_bookie_log.png)
 
@@ -82,23 +81,23 @@ To display information about the BookKeeper in ZooKeeper, you can visit [http://
 
 ![DC/OS bookkeeper in zk](/assets/dcos_bookkeeper_in_zookeeper.png)
 
-## The Pulsar broker Group
+## The Pulsar broker group
 
-Similar to the BookKeeper group above, click into the **brokers** to check the status of the Pulsar brokers.
+Similar to the BookKeeper group above, click **brokers** to check the status of the Pulsar brokers.
 
 ![DC/OS broker status](/assets/dcos_broker_status.png)
 
 ![DC/OS broker running](/assets/dcos_broker_run.png)
 
-You can also click into each broker instance to get more detailed information, such as the broker running log.
+You can also click each broker instance to get more detailed information, such as the broker running log.
 
 ![DC/OS broker log](/assets/dcos_broker_log.png)
 
-Broker cluster information in Zookeeper is also available through the web UI. In this example, you can see that the `loadbalance` and `managed-ledgers` directories have been created.
+Broker cluster information in ZooKeeper is also available through the web UI. In this example, you can see that the `loadbalance` and `managed-ledgers` directories have been created.
 
 ![DC/OS broker in zk](/assets/dcos_broker_in_zookeeper.png)
 
-## Monitor Group
+## Monitor group
 
 The **monitory** group consists of Prometheus and Grafana.
 
@@ -106,17 +105,17 @@ The **monitory** group consists of Prometheus and Grafana.
 
 ### Prometheus
 
-Click into the instance of `prom` to get the endpoint of Prometheus, which is `192.168.65.121:9090` in this example.
+Click the instance of `prom` to get the endpoint of Prometheus, which is `192.168.65.121:9090` in this example.
 
 ![DC/OS prom endpoint](/assets/dcos_prom_endpoint.png)
 
-If you click that endpoint, you can see the Prometheus dashboard. The [http://192.168.65.121:9090/targets](http://192.168.65.121:9090/targets) URL display all the bookies and brokers.
+If you click that endpoint, you can see the Prometheus dashboard. All the bookies and brokers are listed on [http://192.168.65.121:9090/targets](http://192.168.65.121:9090/targets).
 
 ![DC/OS prom targets](/assets/dcos_prom_targets.png)
 
 ### Grafana
 
-Click into `grafana` to get the endpoint for Grafana, which is `192.168.65.121:3000` in this example.
+Click `grafana` to get the endpoint for Grafana, which is `192.168.65.121:3000` in this example.
  
 ![DC/OS grafana endpoint](/assets/dcos_grafana_endpoint.png)
 
@@ -130,7 +129,7 @@ Now that you have a fully deployed Pulsar cluster, you can run a simple consumer
 
 ### Download and prepare the Pulsar Java tutorial
 
-You can clone a [Pulsar Java tutorial](https://github.com/streamlio/pulsar-java-tutorial) repo. This repo contains a simple Pulsar consumer and producer (you can find more information in the `README` file of the repo).
+You can clone a [Pulsar Java tutorial](https://github.com/streamlio/pulsar-java-tutorial) repo. This repo contains a simple Pulsar consumer and producer (you can find more information in the `README` file in this repo).
 
 ```bash
 
@@ -138,12 +137,13 @@ $ git clone https://github.com/streamlio/pulsar-java-tutorial
 
 ```
 
-Change the `SERVICE_URL` from `pulsar://localhost:6650` to `pulsar://a1.dcos:6650` in both [`ConsumerTutorial.java`](https://github.com/streamlio/pulsar-java-tutorial/blob/master/src/main/java/tutorial/ConsumerTutorial.java) and [`ProducerTutorial.java`](https://github.com/streamlio/pulsar-java-tutorial/blob/master/src/main/java/tutorial/ProducerTutorial.java).
-The `pulsar://a1.dcos:6650` endpoint is for the broker service. You can fetch the endpoint details for each broker instance from the DC/OS GUI. `a1.dcos` is a DC/OS client agent, which runs a broker. The client agent IP address can also replace this.
+Change the `SERVICE_URL` from `pulsar://localhost:6650` to `pulsar://a1.dcos:6650` in both [`ConsumerTutorial.java`](https://github.com/streamlio/pulsar-java-tutorial/blob/master/src/main/java/tutorial/ConsumerTutorial.java) file and [`ProducerTutorial.java`](https://github.com/streamlio/pulsar-java-tutorial/blob/master/src/main/java/tutorial/ProducerTutorial.java) file.
 
-Now, change the message number from 10 to 10000000 in main method of [`ProducerTutorial.java`](https://github.com/streamlio/pulsar-java-tutorial/blob/master/src/main/java/tutorial/ProducerTutorial.java) so that it can produce more messages.
+The `pulsar://a1.dcos:6650` endpoint is for the broker service. You can fetch the endpoint details for each broker instance from the DC/OS GUI. `a1.dcos` is a DC/OS client agent that runs a broker, and you can replace it with the client agent IP address.
 
-Now compile the project code using the command below:
+Now, you can change the message number from 10 to 10000000 in the main method in [`ProducerTutorial.java`](https://github.com/streamlio/pulsar-java-tutorial/blob/master/src/main/java/tutorial/ProducerTutorial.java) file to produce more messages.
+
+Then, you can compile the project code using the command below:
 
 ```bash
 
@@ -169,7 +169,7 @@ $ mvn exec:java -Dexec.mainClass="tutorial.ProducerTutorial"
 
 ```
 
-You can see the producer producing messages and the consumer consuming messages through the DC/OS GUI.
+You see that the producer is producing messages and the consumer is consuming messages through the DC/OS GUI.
 
 ![DC/OS pulsar producer](/assets/dcos_producer.png)
 
@@ -177,20 +177,20 @@ You can see the producer producing messages and the consumer consuming messages
 
 ### View Grafana metric output
 
-While the producer and consumer run, you can access running metrics information from Grafana.
+While the producer and consumer are running, you can access the running metrics from Grafana.
 
 ![DC/OS pulsar dashboard](/assets/dcos_metrics.png)
 
 
 ## Uninstall Pulsar
 
-You can shut down and uninstall the `pulsar` application from DC/OS at any time in the following two ways:
+You can shut down and uninstall the `pulsar` application from DC/OS at any time in one of the following two ways:
 
-1. Using the DC/OS GUI, you can choose **Delete** at the right end of Pulsar group.
+1. Click the three dots at the right end of Pulsar group and choose **Delete** on the DC/OS GUI.
 
    ![DC/OS pulsar uninstall](/assets/dcos_uninstall.png)
 
-2. You can use the following command:
+2. Use the command below.
 
    ```bash
    
diff --git a/site2/website-next/versioned_docs/version-2.4.2/deploy-kubernetes.md b/site2/website-next/versioned_docs/version-2.4.2/deploy-kubernetes.md
index dc7123d..4e170dc 100644
--- a/site2/website-next/versioned_docs/version-2.4.2/deploy-kubernetes.md
+++ b/site2/website-next/versioned_docs/version-2.4.2/deploy-kubernetes.md
@@ -2,7 +2,6 @@
 id: deploy-kubernetes
 title: Deploy Pulsar on Kubernetes
 sidebar_label: "Kubernetes"
-original_id: deploy-kubernetes
 ---
 
 To get up and running with these charts as fast as possible, in a **non-production** use case, we provide
diff --git a/site2/website-next/versioned_docs/version-2.4.2/deploy-monitoring.md b/site2/website-next/versioned_docs/version-2.4.2/deploy-monitoring.md
index 074ce3f..95ccdd6 100644
--- a/site2/website-next/versioned_docs/version-2.4.2/deploy-monitoring.md
+++ b/site2/website-next/versioned_docs/version-2.4.2/deploy-monitoring.md
@@ -2,7 +2,6 @@
 id: deploy-monitoring
 title: Monitor
 sidebar_label: "Monitor"
-original_id: deploy-monitoring
 ---
 
 You can use different ways to monitor a Pulsar cluster, exposing both metrics related to the usage of topics and the overall health of the individual components of the cluster.
@@ -127,17 +126,7 @@ The per-topic dashboard instructions are available at [Pulsar manager](administr
 
 You can use grafana to create dashboard driven by the data that is stored in Prometheus.
 
-When you deploy Pulsar on Kubernetes, a `pulsar-grafana` Docker image is enabled by default. You can use the docker image with the principal dashboards.
-
-Enter the command below to use the dashboard manually:
-
-```shell
-
-docker run -p3000:3000 \
-        -e PROMETHEUS_URL=http://$PROMETHEUS_HOST:9090/ \
-        apachepulsar/pulsar-grafana:latest
-
-```
+When you deploy Pulsar on Kubernetes with the Pulsar Helm Chart, a `pulsar-grafana` Docker image is enabled by default. You can use the docker image with the principal dashboards.
 
 The following are some Grafana dashboards examples:
 
@@ -145,4 +134,4 @@ The following are some Grafana dashboards examples:
 - [apache-pulsar-grafana-dashboard](https://github.com/streamnative/apache-pulsar-grafana-dashboard): a collection of Grafana dashboard templates for different Pulsar components running on both Kubernetes and on-premise machines.
 
 ## Alerting rules
-You can set alerting rules according to your Pulsar environment. To configure alerting rules for Apache Pulsar, refer to [alerting rules](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/).
+You can set alerting rules according to your Pulsar environment. To configure alerting rules for Apache Pulsar, refer to [alerting rules](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/).
\ No newline at end of file
diff --git a/site2/website-next/versioned_docs/version-2.4.2/develop-binary-protocol.md b/site2/website-next/versioned_docs/version-2.4.2/develop-binary-protocol.md
index b233f10..fa03383 100644
--- a/site2/website-next/versioned_docs/version-2.4.2/develop-binary-protocol.md
+++ b/site2/website-next/versioned_docs/version-2.4.2/develop-binary-protocol.md
@@ -2,7 +2,6 @@
 id: develop-binary-protocol
 title: Pulsar binary protocol specification
 sidebar_label: "Binary protocol"
-original_id: develop-binary-protocol
 ---
 
 Pulsar uses a custom binary protocol for communications between producers/consumers and brokers. This protocol is designed to support required features, such as acknowledgements and flow control, while ensuring maximum transport and implementation efficiency.
@@ -29,42 +28,63 @@ The Pulsar protocol allows for two types of commands:
 
 Simple (payload-free) commands have this basic structure:
 
-| Component   | Description                                                                             | Size (in bytes) |
-|:------------|:----------------------------------------------------------------------------------------|:----------------|
-| totalSize   | The size of the frame, counting everything that comes after it (in bytes)               | 4               |
-| commandSize | The size of the protobuf-serialized command                                             | 4               |
-| message     | The protobuf message serialized in a raw binary format (rather than in protobuf format) |                 |
+| Component     | Description                                                                             | Size (in bytes) |
+|:--------------|:----------------------------------------------------------------------------------------|:----------------|
+| `totalSize`   | The size of the frame, counting everything that comes after it (in bytes)               | 4               |
+| `commandSize` | The size of the protobuf-serialized command                                             | 4               |
+| `message`     | The protobuf message serialized in a raw binary format (rather than in protobuf format) |                 |
 
 ### Payload commands
 
 Payload commands have this basic structure:
 
-| Component    | Description                                                                                 | Size (in bytes) |
-|:-------------|:--------------------------------------------------------------------------------------------|:----------------|
-| totalSize    | The size of the frame, counting everything that comes after it (in bytes)                   | 4               |
-| commandSize  | The size of the protobuf-serialized command                                                 | 4               |
-| message      | The protobuf message serialized in a raw binary format (rather than in protobuf format)     |                 |
-| magicNumber  | A 2-byte byte array (`0x0e01`) identifying the current format                               | 2               |
-| checksum     | A [CRC32-C checksum](http://www.evanjones.ca/crc32c.html) of everything that comes after it | 4               |
-| metadataSize | The size of the message [metadata](#message-metadata)                                       | 4               |
-| metadata     | The message [metadata](#message-metadata) stored as a binary protobuf message               |                 |
-| payload      | Anything left in the frame is considered the payload and can include any sequence of bytes  |                 |
+| Component                          | Required or optional| Description                                                                                 | Size (in bytes) |
+|:-----------------------------------|:----------|:--------------------------------------------------------------------------------------------|:----------------|
+| `totalSize`                        | Required  | The size of the frame, counting everything that comes after it (in bytes)                   | 4               |
+| `commandSize`                      | Required  | The size of the protobuf-serialized command                                                 | 4               |
+| `message`                          | Required  | The protobuf message serialized in a raw binary format (rather than in protobuf format)     |                 |
+| `magicNumberOfBrokerEntryMetadata` | Optional  | A 2-byte byte array (`0x0e02`) identifying the broker entry metadata   <br /> **Note**: `magicNumberOfBrokerEntryMetadata` , `brokerEntryMetadataSize`, and `brokerEntryMetadata` should be used **together**.                     | 2               |
+| `brokerEntryMetadataSize`          | Optional  | The size of the broker entry metadata                                                       | 4               |
+| `brokerEntryMetadata`              | Optional  | The broker entry metadata stored as a binary protobuf message                               |                 |
+| `magicNumber`                      | Required  | A 2-byte byte array (`0x0e01`) identifying the current format                               | 2               |
+| `checksum`                         | Required  | A [CRC32-C checksum](http://www.evanjones.ca/crc32c.html) of everything that comes after it | 4               |
+| `metadataSize`                     | Required  | The size of the message [metadata](#message-metadata)                                       | 4               |
+| `metadata`                         | Required  | The message [metadata](#message-metadata) stored as a binary protobuf message               |                 |
+| `payload`                          | Required  | Anything left in the frame is considered the payload and can include any sequence of bytes  |                 |
+
+## Broker entry metadata
+
+Broker entry metadata is stored alongside the message metadata as a serialized protobuf message.
+It is created by the broker when the message arrived at the broker and passed without changes to the consumer if configured.
+
+| Field              | Required or optional       | Description                                                                                                                   |
+|:-------------------|:----------------|:------------------------------------------------------------------------------------------------------------------------------|
+| `broker_timestamp` | Optional        | The timestamp when a message arrived at the broker (`id est` as the number of milliseconds since January 1st, 1970 in UTC)      |
+| `index`            | Optional        | The index of the message. It is assigned by the broker.
+
+If you want to use broker entry metadata for **brokers**, configure the [`brokerEntryMetadataInterceptors`](reference-configuration.md#broker) parameter in the `broker.conf` file.
+
+If you want to use broker entry metadata for **consumers**:
+
+1. Use the client protocol version [18 or later](https://github.com/apache/pulsar/blob/ca37e67211feda4f7e0984e6414e707f1c1dfd07/pulsar-common/src/main/proto/PulsarApi.proto#L259).
+   
+2. Configure the [`brokerEntryMetadataInterceptors`](reference-configuration.md#broker) parameter and set the [`enableExposingBrokerEntryMetadataToClient`](reference-configuration.md#broker) parameter to `true` in the `broker.conf` file.
 
 ## Message metadata
 
-Message metadata is stored alongside the application-specified payload as a serialized protobuf message. Metadata is created by the producer and passed on unchanged to the consumer.
+Message metadata is stored alongside the application-specified payload as a serialized protobuf message. Metadata is created by the producer and passed without changes to the consumer.
 
-| Field                                | Description                                                                                                                                                                                                                                               |
-|:-------------------------------------|:----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| `producer_name`                      | The name of the producer that published the message                                                                                                                                                                                         |
-| `sequence_id`                        | The sequence ID of the message, assigned by producer                                                                                                                                                                                        |
-| `publish_time`                       | The publish timestamp in Unix time (i.e. as the number of milliseconds since January 1st, 1970 in UTC)                                                                                                                                                    |
-| `properties`                         | A sequence of key/value pairs (using the [`KeyValue`](https://github.com/apache/pulsar/blob/master/pulsar-common/src/main/proto/PulsarApi.proto#L32) message). These are application-defined keys and values with no special meaning to Pulsar. |
-| `replicated_from` *(optional)*       | Indicates that the message has been replicated and specifies the name of the [cluster](reference-terminology.md#cluster) where the message was originally published                                                                                                             |
-| `partition_key` *(optional)*         | While publishing on a partition topic, if the key is present, the hash of the key is used to determine which partition to choose                                                                                                                          |
-| `compression` *(optional)*           | Signals that payload has been compressed and with which compression library                                                                                                                                                                               |
-| `uncompressed_size` *(optional)*     | If compression is used, the producer must fill the uncompressed size field with the original payload size                                                                                                                                                 |
-| `num_messages_in_batch` *(optional)* | If this message is really a [batch](#batch-messages) of multiple entries, this field must be set to the number of messages in the batch                                                                                                                   |
+| Field                    | Required or optional | Description                                                                                                                                                                                                                                               |
+|:-------------------------|:----------|:----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `producer_name`          | Required  | The name of the producer that published the message                                                                                                                                                                                         |
+| `sequence_id`            | Required  | The sequence ID of the message, assigned by producer                                                                                                                                                                                        |
+| `publish_time`           | Required  | The publish timestamp in Unix time (i.e. as the number of milliseconds since January 1st, 1970 in UTC)                                                                                                                                                    |
+| `properties`             | Required  | A sequence of key/value pairs (using the [`KeyValue`](https://github.com/apache/pulsar/blob/master/pulsar-common/src/main/proto/PulsarApi.proto#L32) message). These are application-defined keys and values with no special meaning to Pulsar. |
+| `replicated_from`        | Optional  |  Indicates that the message has been replicated and specifies the name of the [cluster](reference-terminology.md#cluster) where the message was originally published                                                                                                             |
+| `partition_key`          | Optional  | While publishing on a partition topic, if the key is present, the hash of the key is used to determine which partition to choose. Partition key is used as the message key.                                                                                                                          |
+| `compression`            | Optional  | Signals that payload has been compressed and with which compression library                                                                                                                                                                               |
+| `uncompressed_size`      | Optional  | If compression is used, the producer must fill the uncompressed size field with the original payload size                                                                                                                                                 |
+| `num_messages_in_batch`  | Optional  | If this message is really a [batch](#batch-messages) of multiple entries, this field must be set to the number of messages in the batch                                                                                                                   |
 
 ### Batch messages
 
@@ -76,19 +96,19 @@ object.
 For a single batch, the payload format will look like this:
 
 
-| Field         | Description                                                 |
-|:--------------|:------------------------------------------------------------|
-| metadataSizeN | The size of the single message metadata serialized Protobuf |
-| metadataN     | Single message metadata                                     |
-| payloadN      | Message payload passed by application                       |
+| Field           | Required or optional | Description                                                |
+|:----------------|:---------------------|:-----------------------------------------------------------|
+| `metadataSizeN` | Required             |The size of the single message metadata serialized Protobuf |
+| `metadataN`     | Required             |Single message metadata                                     |
+| `payloadN`      | Required             |Message payload passed by application                       |
 
 Each metadata field looks like this;
 
-| Field                      | Description                                             |
-|:---------------------------|:--------------------------------------------------------|
-| properties                 | Application-defined properties                          |
-| partition key *(optional)* | Key to indicate the hashing to a particular partition   |
-| payload_size               | Size of the payload for the single message in the batch |
+| Field           | Required or optional  | Description                                             |
+|:----------------|:----------------------|:--------------------------------------------------------|
+| `properties`    | Required              | Application-defined properties                          |
+| `partition key` | Optional              | Key to indicate the hashing to a particular partition   |
+| `payload_size`  | Required              | Size of the payload for the single message in the batch |
 
 When compression is enabled, the whole batch will be compressed at once.
 
@@ -170,6 +190,10 @@ messages to the broker, referring to the producer id negotiated before.
 
 ![Producer interaction](/assets/binary-protocol-producer.png)
 
+If the client does not receive a response indicating producer creation success or failure,
+the client should first send a command to close the original producer before sending a
+command to re-attempt producer creation.
+
 ##### Command Producer
 
 ```protobuf
@@ -273,6 +297,11 @@ When receiving a `CloseProducer` command, the broker will stop accepting any
 more messages for the producer, wait until all pending messages are persisted
 and then reply `Success` to the client.
 
+If the client does not receive a response to a `Producer` command within a timeout,
+the client must first send a `CloseProducer` command before sending another
+`Producer` command. The client does not need to await a response to the `CloseProducer`
+command before sending the next `Producer` command.
+
 The broker can send a `CloseProducer` command to client when it's performing
 a graceful failover (eg: broker is being restarted, or the topic is being unloaded
 by load balancer to be transferred to a different broker).
diff --git a/site2/website-next/versioned_docs/version-2.4.2/develop-load-manager.md b/site2/website-next/versioned_docs/version-2.4.2/develop-load-manager.md
index 509209b..9687f30 100644
--- a/site2/website-next/versioned_docs/version-2.4.2/develop-load-manager.md
+++ b/site2/website-next/versioned_docs/version-2.4.2/develop-load-manager.md
@@ -2,7 +2,6 @@
 id: develop-load-manager
 title: Modular load manager
 sidebar_label: "Modular load manager"
-original_id: develop-load-manager
 ---
 
 The *modular load manager*, implemented in  [`ModularLoadManagerImpl`](https://github.com/apache/pulsar/blob/master/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/ModularLoadManagerImpl.java), is a flexible alternative to the previously implemented load manager, [`SimpleLoadManagerImpl`](https://github.com/apache/pulsar/blob/master/pulsar-broker/src/main/java/org/apache/pulsar/broker/loadbalance/impl/SimpleLoadManagerImpl.java), which attempts to simplify how load  [...]
diff --git a/site2/website-next/versioned_docs/version-2.4.2/develop-tools.md b/site2/website-next/versioned_docs/version-2.4.2/develop-tools.md
index b545779..d034926 100644
--- a/site2/website-next/versioned_docs/version-2.4.2/develop-tools.md
+++ b/site2/website-next/versioned_docs/version-2.4.2/develop-tools.md
@@ -2,7 +2,6 @@
 id: develop-tools
 title: Simulation tools
 sidebar_label: "Simulation tools"
-original_id: develop-tools
 ---
 
 It is sometimes necessary create an test environment and incur artificial load to observe how well load managers
diff --git a/site2/website-next/versioned_docs/version-2.4.2/io-cdc.md b/site2/website-next/versioned_docs/version-2.4.2/io-cdc.md
index 20f16ae..df37397 100644
--- a/site2/website-next/versioned_docs/version-2.4.2/io-cdc.md
+++ b/site2/website-next/versioned_docs/version-2.4.2/io-cdc.md
@@ -2,7 +2,6 @@
 id: io-cdc
 title: CDC connector
 sidebar_label: "CDC connector"
-original_id: io-cdc
 ---
 
 CDC source connectors capture log changes of databases (such as MySQL, MongoDB, and PostgreSQL) into Pulsar.
diff --git a/site2/website-next/versioned_docs/version-2.4.2/io-overview.md b/site2/website-next/versioned_docs/version-2.4.2/io-overview.md
index 68960a8..810de78 100644
--- a/site2/website-next/versioned_docs/version-2.4.2/io-overview.md
+++ b/site2/website-next/versioned_docs/version-2.4.2/io-overview.md
@@ -2,7 +2,6 @@
 id: io-overview
 title: Pulsar connector overview
 sidebar_label: "Overview"
-original_id: io-overview
 ---
 
 import Tabs from '@theme/Tabs';
diff --git a/site2/website-next/versioned_docs/version-2.4.2/pulsar-2.0.md b/site2/website-next/versioned_docs/version-2.4.2/pulsar-2.0.md
index 11c5e66..560c8c1 100644
--- a/site2/website-next/versioned_docs/version-2.4.2/pulsar-2.0.md
+++ b/site2/website-next/versioned_docs/version-2.4.2/pulsar-2.0.md
@@ -2,7 +2,6 @@
 id: pulsar-2.0
 title: Pulsar 2.0
 sidebar_label: "Pulsar 2.0"
-original_id: pulsar-2.0
 ---
 
 Pulsar 2.0 is a major new release for Pulsar that brings some bold changes to the platform, including [simplified topic names](#topic-names), the addition of the [Pulsar Functions](functions-overview) feature, some terminology changes, and more.
diff --git a/site2/website-next/versioned_docs/version-2.4.2/reference-pulsar-admin.md b/site2/website-next/versioned_docs/version-2.4.2/pulsar-admin.md
similarity index 100%
rename from site2/website-next/versioned_docs/version-2.4.2/reference-pulsar-admin.md
rename to site2/website-next/versioned_docs/version-2.4.2/pulsar-admin.md
diff --git a/site2/website-next/versioned_docs/version-2.4.2/reference-cli-tools.md b/site2/website-next/versioned_docs/version-2.4.2/reference-cli-tools.md
index 8c2c64f..0c8aea1 100644
--- a/site2/website-next/versioned_docs/version-2.4.2/reference-cli-tools.md
+++ b/site2/website-next/versioned_docs/version-2.4.2/reference-cli-tools.md
@@ -2,7 +2,6 @@
 id: reference-cli-tools
 title: Pulsar command-line tools
 sidebar_label: "Pulsar CLI tools"
-original_id: reference-cli-tools
 ---
 
 Pulsar offers several command-line tools that you can use for managing Pulsar installations, performance testing, using command-line producers and consumers, and more.
@@ -16,8 +15,12 @@ All Pulsar command-line tools can be run from the `bin` directory of your [insta
 * [`bookkeeper`](#bookkeeper)
 * [`broker-tool`](#broker-tool)
 
-> ### Getting help
-> You can get help for any CLI tool, command, or subcommand using the `--help` flag, or `-h` for short. Here's an example:
+> **Important** 
+>
+> - This page only shows **some frequently used commands**. For the latest information about `pulsar`, `pulsar-client`, and `pulsar-perf`, including commands, flags, descriptions, and more information, see [Pulsar tools](https://pulsar.apache.org/tools/).
+>  
+> - You can get help for any CLI tool, command, or subcommand using the `--help` flag, or `-h` for short. Here's an example:
+> 
 
 > ```shell
... 810 lines suppressed ...

[pulsar-site] 03/04: update migrate scripts

Posted by ur...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

urfree pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/pulsar-site.git

commit 7b09a265d26186354270e920db87fe42a207f3c3
Author: LiLi <ur...@apache.org>
AuthorDate: Thu Feb 17 16:29:56 2022 +0800

    update migrate scripts
    
    Signed-off-by: LiLi <ur...@apache.org>
---
 site2/website-next/migrate/migrate-docs.js | 4 ++++
 site2/website-next/migrate/tool/find-md.js | 2 +-
 2 files changed, 5 insertions(+), 1 deletion(-)

diff --git a/site2/website-next/migrate/migrate-docs.js b/site2/website-next/migrate/migrate-docs.js
index 31f09a1..9b727c0 100644
--- a/site2/website-next/migrate/migrate-docs.js
+++ b/site2/website-next/migrate/migrate-docs.js
@@ -11,6 +11,7 @@ function migrate(version, chapter, docsId, cb) {
   if (version == "next") {
     dest = "../../" + next.docsDir;
   }
+  let destDir = path.join(__dirname, dest);
   dest = path.join(__dirname, dest, docsId + ".md");
   let mdpath = findMd(version, docsId);
   if (mdpath) {
@@ -27,6 +28,9 @@ function migrate(version, chapter, docsId, cb) {
     );
     let data = fs.readFileSync(mdpath, "utf8");
     data = fixMd(data, version);
+    if (!fs.existsSync(destDir)) {
+      fs.mkdirSync(destDir);
+    }
     fs.writeFileSync(dest, data);
     cb && cb(docsId);
   } else {
diff --git a/site2/website-next/migrate/tool/find-md.js b/site2/website-next/migrate/tool/find-md.js
index 1429be7..98b5268 100644
--- a/site2/website-next/migrate/tool/find-md.js
+++ b/site2/website-next/migrate/tool/find-md.js
@@ -50,7 +50,7 @@ const _search = (dir, version, docsId, reg) => {
     }
     let data = fs.readFileSync(pathname, "utf8");
     if (reg.test(data)) {
-      console.log("         [" + version + ":" + docsId + "]fund: " + pathname);
+      console.log("         ******[" + version + ":" + docsId + "]fund: " + pathname);
       return pathname;
     }
   }

[pulsar-site] 04/04: update

Posted by ur...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

urfree pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/pulsar-site.git

commit ebae8860fdb9db2eed31f41810cda9bf003e79f3
Author: LiLi <ur...@apache.org>
AuthorDate: Thu Feb 17 16:31:14 2022 +0800

    update
    
    Signed-off-by: LiLi <ur...@apache.org>
---
 site2/website-next/versions.json | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/site2/website-next/versions.json b/site2/website-next/versions.json
index dca5a36..412838d 100644
--- a/site2/website-next/versions.json
+++ b/site2/website-next/versions.json
@@ -1 +1 @@
-["2.3.2", "2.3.1", "2.3.0", "2.2.1", "2.2.0"]
\ No newline at end of file
+["2.9.1", "2.9.0"]
\ No newline at end of file