You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@pulsar.apache.org by li...@apache.org on 2021/09/01 03:51:58 UTC

[pulsar] branch master updated: [website][upgrade]feat: docs migration about Concepts and Architecture (versions: next/2.8.0/2.7.3) (#11851)

This is an automated email from the ASF dual-hosted git repository.

liuyu pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/pulsar.git


The following commit(s) were added to refs/heads/master by this push:
     new ce53d07  [website][upgrade]feat: docs migration about Concepts and Architecture (versions: next/2.8.0/2.7.3) (#11851)
ce53d07 is described below

commit ce53d07c9ee8f0ebdfb64a54a3cf5edcede22bc0
Author: Li Li <ur...@gmail.com>
AuthorDate: Wed Sep 1 11:50:49 2021 +0800

    [website][upgrade]feat: docs migration about Concepts and Architecture (versions: next/2.8.0/2.7.3) (#11851)
    
    * feat: md assets
    
    Signed-off-by: LiLi <ur...@gmail.com>
    
    * feat: docs about Concepts and Architecture (version: next)
    
    Signed-off-by: LiLi <ur...@gmail.com>
    
    * feat: docs about Concepts and Architecture (version: 2.8.0)
    
    * fix: md link jumper fixed
    
    Signed-off-by: LiLi <ur...@gmail.com>
    
    * feat: feat: docs migration about Concepts and Architecture - 2.7.3
    
    Signed-off-by: LiLi <ur...@gmail.com>
---
 .../docs/concepts-architecture-overview.md         | 161 ++++++
 site2/website-next/docs/concepts-authentication.md |   8 +
 site2/website-next/docs/concepts-clients.md        |  84 +++
 site2/website-next/docs/concepts-messaging.md      | 608 +++++++++++++++++++++
 site2/website-next/docs/concepts-multi-tenancy.md  |  54 ++
 .../docs/concepts-multiple-advertised-listeners.md |  38 ++
 site2/website-next/docs/concepts-overview.md       |  30 +
 .../docs/concepts-proxy-sni-routing.md             | 150 +++++
 site2/website-next/docs/concepts-replication.md    |   8 +
 .../website-next/docs/concepts-schema-registry.md  |   5 +
 site2/website-next/docs/concepts-tiered-storage.md |  17 +
 .../website-next/docs/concepts-topic-compaction.md |  36 ++
 site2/website-next/docs/concepts-transactions.md   |  29 +
 site2/website-next/docusaurus.config.js            |   8 +
 site2/website-next/package.json                    |   1 +
 site2/website-next/sidebars.js                     |  16 +
 .../static/assets/binary-protocol-connect.png      | Bin 0 -> 10230 bytes
 .../static/assets/binary-protocol-consumer.png     | Bin 0 -> 32239 bytes
 .../static/assets/binary-protocol-producer.png     | Bin 0 -> 33100 bytes
 .../static/assets/binary-protocol-topic-lookup.png | Bin 0 -> 29050 bytes
 site2/website-next/static/assets/broker-bookie.png | Bin 0 -> 136204 bytes
 site2/website-next/static/assets/chunking-01.png   | Bin 0 -> 11881 bytes
 site2/website-next/static/assets/chunking-02.png   | Bin 0 -> 30135 bytes
 .../website-next/static/assets/dcos_bookie_log.png | Bin 0 -> 668472 bytes
 .../static/assets/dcos_bookkeeper_in_zookeeper.png | Bin 0 -> 264657 bytes
 .../static/assets/dcos_bookkeeper_run.png          | Bin 0 -> 524502 bytes
 .../static/assets/dcos_bookkeeper_status.png       | Bin 0 -> 115364 bytes
 .../static/assets/dcos_broker_in_zookeeper.png     | Bin 0 -> 402876 bytes
 .../website-next/static/assets/dcos_broker_log.png | Bin 0 -> 689765 bytes
 .../website-next/static/assets/dcos_broker_run.png | Bin 0 -> 200575 bytes
 .../static/assets/dcos_broker_status.png           | Bin 0 -> 144495 bytes
 .../static/assets/dcos_command_execute.png         | Bin 0 -> 129195 bytes
 .../static/assets/dcos_command_execute2.png        | Bin 0 -> 699589 bytes
 site2/website-next/static/assets/dcos_consumer.png | Bin 0 -> 1620672 bytes
 .../static/assets/dcos_grafana_dashboard.png       | Bin 0 -> 102136 bytes
 .../static/assets/dcos_grafana_endpoint.png        | Bin 0 -> 277004 bytes
 site2/website-next/static/assets/dcos_metrics.png  | Bin 0 -> 389670 bytes
 .../static/assets/dcos_monitor_status.png          | Bin 0 -> 136506 bytes
 site2/website-next/static/assets/dcos_producer.png | Bin 0 -> 1542880 bytes
 .../static/assets/dcos_prom_endpoint.png           | Bin 0 -> 224700 bytes
 .../static/assets/dcos_prom_targets.png            | Bin 0 -> 240218 bytes
 .../website-next/static/assets/dcos_uninstall.png  | Bin 0 -> 115389 bytes
 .../static/assets/functions-worker-corun-proxy.png | Bin 0 -> 12928 bytes
 .../static/assets/functions-worker-corun.png       | Bin 0 -> 11286 bytes
 .../assets/functions-worker-separated-proxy.png    | Bin 0 -> 24788 bytes
 .../static/assets/functions-worker-separated.png   | Bin 0 -> 21649 bytes
 .../website-next/static/assets/geo-replication.png | Bin 0 -> 86141 bytes
 .../static/assets/message-deduplication.png        | Bin 0 -> 116703 bytes
 site2/website-next/static/assets/message_delay.png | Bin 0 -> 30906 bytes
 site2/website-next/static/assets/partitioning.png  | Bin 0 -> 125576 bytes
 site2/website-next/static/assets/perf-produce.png  | Bin 0 -> 204162 bytes
 .../static/assets/pulsar-basic-setup.png           | Bin 0 -> 125695 bytes
 .../static/assets/pulsar-encryption-consumer.jpg   | Bin 0 -> 31221 bytes
 .../static/assets/pulsar-encryption-producer.jpg   | Bin 0 -> 30885 bytes
 .../assets/pulsar-exclusive-subscriptions.png      | Bin 0 -> 59440 bytes
 .../assets/pulsar-failover-subscriptions.png       | Bin 0 -> 64604 bytes
 .../static/assets/pulsar-functions-overview.png    | Bin 0 -> 77077 bytes
 .../assets/pulsar-functions-routing-example.png    | Bin 0 -> 62087 bytes
 .../static/assets/pulsar-functions-word-count.png  | Bin 0 -> 61459 bytes
 site2/website-next/static/assets/pulsar-io.png     | Bin 0 -> 37316 bytes
 .../assets/pulsar-key-shared-subscriptions.png     | Bin 0 -> 124310 bytes
 .../assets/pulsar-reader-consumer-interfaces.png   | Bin 0 -> 131839 bytes
 .../static/assets/pulsar-service-discovery.png     | Bin 0 -> 65898 bytes
 .../static/assets/pulsar-shared-subscriptions.png  | Bin 0 -> 68922 bytes
 .../static/assets/pulsar-sni-client.png            | Bin 0 -> 232801 bytes
 .../website-next/static/assets/pulsar-sni-geo.png  | Bin 0 -> 201549 bytes
 .../static/assets/pulsar-sql-arch-1.png            | Bin 0 -> 167744 bytes
 .../static/assets/pulsar-sql-arch-2.png            | Bin 0 -> 130432 bytes
 .../static/assets/pulsar-subscription-modes.png    | Bin 0 -> 220423 bytes
 .../static/assets/pulsar-system-architecture.png   | Bin 0 -> 309560 bytes
 .../static/assets/pulsar-tiered-storage.png        | Bin 0 -> 70532 bytes
 .../static/assets/retention-expiry.png             | Bin 0 -> 113741 bytes
 .../static/assets/schema-autoupdate-consumer.png   | Bin 0 -> 86897 bytes
 .../static/assets/schema-autoupdate-producer.png   | Bin 0 -> 116544 bytes
 .../website-next/static/assets/schema-consumer.png | Bin 0 -> 171003 bytes
 .../website-next/static/assets/schema-producer.png | Bin 0 -> 161680 bytes
 site2/website-next/static/assets/txn-1.png         | Bin 0 -> 146508 bytes
 site2/website-next/static/assets/txn-2.png         | Bin 0 -> 57117 bytes
 site2/website-next/static/assets/txn-3.png         | Bin 0 -> 249164 bytes
 site2/website-next/static/assets/txn-4.png         | Bin 0 -> 197983 bytes
 site2/website-next/static/assets/txn-5.png         | Bin 0 -> 202533 bytes
 site2/website-next/static/assets/txn-6.png         | Bin 0 -> 188856 bytes
 site2/website-next/static/assets/txn-7.png         | Bin 0 -> 216642 bytes
 site2/website-next/static/assets/txn-8.png         | Bin 0 -> 190633 bytes
 site2/website-next/static/assets/txn-9.png         | Bin 0 -> 469713 bytes
 .../concepts-architecture-overview.md              | 156 ++++++
 .../version-2.7.3/concepts-authentication.md       |   9 +
 .../version-2.7.3/concepts-clients.md              |  85 +++
 .../version-2.7.3/concepts-messaging.md            | 521 ++++++++++++++++++
 .../version-2.7.3/concepts-multi-tenancy.md        |  55 ++
 .../concepts-multiple-advertised-listeners.md      |  39 ++
 .../version-2.7.3/concepts-overview.md             |  31 ++
 .../version-2.7.3/concepts-proxy-sni-routing.md    | 121 ++++
 .../version-2.7.3/concepts-replication.md          |   9 +
 .../version-2.7.3/concepts-tiered-storage.md       |  18 +
 .../version-2.7.3/concepts-topic-compaction.md     |  37 ++
 .../version-2.7.3/concepts-transactions.md         |  30 +
 .../concepts-architecture-overview.md              | 162 ++++++
 .../version-2.8.0/concepts-authentication.md       |   9 +
 .../version-2.8.0/concepts-clients.md              |  85 +++
 .../version-2.8.0/concepts-messaging.md            | 555 +++++++++++++++++++
 .../version-2.8.0/concepts-multi-tenancy.md        |  55 ++
 .../concepts-multiple-advertised-listeners.md      |  39 ++
 .../version-2.8.0/concepts-overview.md             |  31 ++
 .../version-2.8.0/concepts-proxy-sni-routing.md    | 151 +++++
 .../version-2.8.0/concepts-replication.md          |   9 +
 .../version-2.8.0/concepts-tiered-storage.md       |  18 +
 .../version-2.8.0/concepts-topic-compaction.md     |  37 ++
 .../version-2.8.0/concepts-transactions.md         |  30 +
 .../versioned_sidebars/version-2.7.3-sidebars.json |  50 +-
 .../versioned_sidebars/version-2.8.0-sidebars.json |  48 ++
 111 files changed, 3642 insertions(+), 1 deletion(-)

diff --git a/site2/website-next/docs/concepts-architecture-overview.md b/site2/website-next/docs/concepts-architecture-overview.md
new file mode 100644
index 0000000..1638fcd
--- /dev/null
+++ b/site2/website-next/docs/concepts-architecture-overview.md
@@ -0,0 +1,161 @@
+---
+id: concepts-architecture-overview
+title: Architecture Overview
+sidebar_label: Architecture
+---
+
+At the highest level, a Pulsar instance is composed of one or more Pulsar clusters. Clusters within an instance can [replicate](concepts-replication.md) data amongst themselves.
+
+In a Pulsar cluster:
+
+* One or more brokers handles and load balances incoming messages from producers, dispatches messages to consumers, communicates with the Pulsar configuration store to handle various coordination tasks, stores messages in BookKeeper instances (aka bookies), relies on a cluster-specific ZooKeeper cluster for certain tasks, and more.
+* A BookKeeper cluster consisting of one or more bookies handles [persistent storage](#persistent-storage) of messages.
+* A ZooKeeper cluster specific to that cluster handles coordination tasks between Pulsar clusters.
+
+The diagram below provides an illustration of a Pulsar cluster:
+
+![Pulsar architecture diagram](/assets/pulsar-system-architecture.png)
+
+At the broader instance level, an instance-wide ZooKeeper cluster called the configuration store handles coordination tasks involving multiple clusters, for example [geo-replication](concepts-replication.md).
+
+## Brokers
+
+The Pulsar message broker is a stateless component that's primarily responsible for running two other components:
+
+* An HTTP server that exposes a {@inject: rest:REST:/} API for both administrative tasks and [topic lookup](concepts-clients.md#client-setup-phase) for producers and consumers. The producers connect to the brokers to publish messages and the consumers connect to the brokers to consume the messages.
+* A dispatcher, which is an asynchronous TCP server over a custom [binary protocol](developing-binary-protocol.md) used for all data transfers
+
+Messages are typically dispatched out of a [managed ledger](#managed-ledgers) cache for the sake of performance, *unless* the backlog exceeds the cache size. If the backlog grows too large for the cache, the broker will start reading entries from BookKeeper.
+
+Finally, to support geo-replication on global topics, the broker manages replicators that tail the entries published in the local region and republish them to the remote region using the Pulsar [Java client library](client-libraries-java.md).
+
+> For a guide to managing Pulsar brokers, see the [brokers](admin-api-brokers.md) guide.
+
+## Clusters
+
+A Pulsar instance consists of one or more Pulsar *clusters*. Clusters, in turn, consist of:
+
+* One or more Pulsar [brokers](#brokers)
+* A ZooKeeper quorum used for cluster-level configuration and coordination
+* An ensemble of bookies used for [persistent storage](#persistent-storage) of messages
+
+Clusters can replicate amongst themselves using [geo-replication](concepts-replication.md).
+
+> For a guide to managing Pulsar clusters, see the [clusters](admin-api-clusters.md) guide.
+
+## Metadata store
+
+The Pulsar metadata store maintains all the metadata of a Pulsar cluster, such as topic metadata, schema, broker load data, and so on. Pulsar uses [Apache ZooKeeper](https://zookeeper.apache.org/) for metadata storage, cluster configuration, and coordination. The Pulsar metadata store can be deployed on a separate ZooKeeper cluster or deployed on an existing ZooKeeper cluster. You can use one ZooKeeper cluster for both Pulsar metadata store and [BookKeeper metadata store](https://bookkee [...]
+
+In a Pulsar instance:
+
+* A configuration store quorum stores configuration for tenants, namespaces, and other entities that need to be globally consistent.
+* Each cluster has its own local ZooKeeper ensemble that stores cluster-specific configuration and coordination such as which brokers are responsible for which topics as well as ownership metadata, broker load reports, BookKeeper ledger metadata, and more.
+
+## Configuration store
+
+The configuration store maintains all the configurations of a Pulsar instance, such as clusters, tenants, namespaces, partitioned topic related configurations, and so on. A Pulsar instance can have a single local cluster, multiple local clusters, or multiple cross-region clusters. Consequently, the configuration store can share the configurations across multiple clusters under a Pulsar instance. The configuration store can be deployed on a separate ZooKeeper cluster or deployed on an exi [...]
+
+## Persistent storage
+
+Pulsar provides guaranteed message delivery for applications. If a message successfully reaches a Pulsar broker, it will be delivered to its intended target.
+
+This guarantee requires that non-acknowledged messages are stored in a durable manner until they can be delivered to and acknowledged by consumers. This mode of messaging is commonly called *persistent messaging*. In Pulsar, N copies of all messages are stored and synced on disk, for example 4 copies across two servers with mirrored [RAID](https://en.wikipedia.org/wiki/RAID) volumes on each server.
+
+### Apache BookKeeper
+
+Pulsar uses a system called [Apache BookKeeper](http://bookkeeper.apache.org/) for persistent message storage. BookKeeper is a distributed [write-ahead log](https://en.wikipedia.org/wiki/Write-ahead_logging) (WAL) system that provides a number of crucial advantages for Pulsar:
+
+* It enables Pulsar to utilize many independent logs, called [ledgers](#ledgers). Multiple ledgers can be created for topics over time.
+* It offers very efficient storage for sequential data that handles entry replication.
+* It guarantees read consistency of ledgers in the presence of various system failures.
+* It offers even distribution of I/O across bookies.
+* It's horizontally scalable in both capacity and throughput. Capacity can be immediately increased by adding more bookies to a cluster.
+* Bookies are designed to handle thousands of ledgers with concurrent reads and writes. By using multiple disk devices---one for journal and another for general storage--bookies are able to isolate the effects of read operations from the latency of ongoing write operations.
+
+In addition to message data, *cursors* are also persistently stored in BookKeeper. Cursors are [subscription](reference-terminology.md#subscription) positions for [consumers](reference-terminology.md#consumer). BookKeeper enables Pulsar to store consumer position in a scalable fashion.
+
+At the moment, Pulsar supports persistent message storage. This accounts for the `persistent` in all topic names. Here's an example:
+
+```http
+persistent://my-tenant/my-namespace/my-topic
+```
+
+> Pulsar also supports ephemeral ([non-persistent](concepts-messaging.md#non-persistent-topics)) message storage.
+
+
+You can see an illustration of how brokers and bookies interact in the diagram below:
+
+![Brokers and bookies](/assets/broker-bookie.png)
+
+
+### Ledgers
+
+A ledger is an append-only data structure with a single writer that is assigned to multiple BookKeeper storage nodes, or bookies. Ledger entries are replicated to multiple bookies. Ledgers themselves have very simple semantics:
+
+* A Pulsar broker can create a ledger, append entries to the ledger, and close the ledger.
+* After the ledger has been closed---either explicitly or because the writer process crashed---it can then be opened only in read-only mode.
+* Finally, when entries in the ledger are no longer needed, the whole ledger can be deleted from the system (across all bookies).
+
+#### Ledger read consistency
+
+The main strength of Bookkeeper is that it guarantees read consistency in ledgers in the presence of failures. Since the ledger can only be written to by a single process, that process is free to append entries very efficiently, without need to obtain consensus. After a failure, the ledger will go through a recovery process that will finalize the state of the ledger and establish which entry was last committed to the log. After that point, all readers of the ledger are guaranteed to see  [...]
+
+#### Managed ledgers
+
+Given that Bookkeeper ledgers provide a single log abstraction, a library was developed on top of the ledger called the *managed ledger* that represents the storage layer for a single topic. A managed ledger represents the abstraction of a stream of messages with a single writer that keeps appending at the end of the stream and multiple cursors that are consuming the stream, each with its own associated position.
+
+Internally, a single managed ledger uses multiple BookKeeper ledgers to store the data. There are two reasons to have multiple ledgers:
+
+1. After a failure, a ledger is no longer writable and a new one needs to be created.
+2. A ledger can be deleted when all cursors have consumed the messages it contains. This allows for periodic rollover of ledgers.
+
+### Journal storage
+
+In BookKeeper, *journal* files contain BookKeeper transaction logs. Before making an update to a [ledger](#ledgers), a bookie needs to ensure that a transaction describing the update is written to persistent (non-volatile) storage. A new journal file is created once the bookie starts or the older journal file reaches the journal file size threshold (configured using the [`journalMaxSizeMB`](reference-configuration.md#bookkeeper-journalMaxSizeMB) parameter).
+
+## Pulsar proxy
+
+One way for Pulsar clients to interact with a Pulsar [cluster](#clusters) is by connecting to Pulsar message [brokers](#brokers) directly. In some cases, however, this kind of direct connection is either infeasible or undesirable because the client doesn't have direct access to broker addresses. If you're running Pulsar in a cloud environment or on [Kubernetes](https://kubernetes.io) or an analogous platform, for example, then direct client connections to brokers are likely not possible.
+
+The **Pulsar proxy** provides a solution to this problem by acting as a single gateway for all of the brokers in a cluster. If you run the Pulsar proxy (which, again, is optional), all client connections with the Pulsar cluster will flow through the proxy rather than communicating with brokers.
+
+> For the sake of performance and fault tolerance, you can run as many instances of the Pulsar proxy as you'd like.
+
+Architecturally, the Pulsar proxy gets all the information it requires from ZooKeeper. When starting the proxy on a machine, you only need to provide ZooKeeper connection strings for the cluster-specific and instance-wide configuration store clusters. Here's an example:
+
+```bash
+$ bin/pulsar proxy \
+  --zookeeper-servers zk-0,zk-1,zk-2 \
+  --configuration-store-servers zk-0,zk-1,zk-2
+```
+
+> #### Pulsar proxy docs
+> For documentation on using the Pulsar proxy, see the [Pulsar proxy admin documentation](administration-proxy.md).
+
+
+Some important things to know about the Pulsar proxy:
+
+* Connecting clients don't need to provide *any* specific configuration to use the Pulsar proxy. You won't need to update the client configuration for existing applications beyond updating the IP used for the service URL (for example if you're running a load balancer over the Pulsar proxy).
+* [TLS encryption](security-tls-transport.md) and [authentication](security-tls-authentication.md) is supported by the Pulsar proxy
+
+## Service discovery
+
+[Clients](getting-started-clients.md) connecting to Pulsar brokers need to be able to communicate with an entire Pulsar instance using a single URL. Pulsar provides a built-in service discovery mechanism that you can set up using the instructions in the [Deploying a Pulsar instance](deploy-bare-metal.md#service-discovery-setup) guide.
+
+You can use your own service discovery system if you'd like. If you use your own system, there is just one requirement: when a client performs an HTTP request to an endpoint, such as `http://pulsar.us-west.example.com:8080`, the client needs to be redirected to *some* active broker in the desired cluster, whether via DNS, an HTTP or IP redirect, or some other means.
+
+The diagram below illustrates Pulsar service discovery:
+
+![alt-text](/assets/pulsar-service-discovery.png)
+
+In this diagram, the Pulsar cluster is addressable via a single DNS name: `pulsar-cluster.acme.com`. A [Python client](client-libraries-python.md), for example, could access this Pulsar cluster like this:
+
+```python
+from pulsar import Client
+
+client = Client('pulsar://pulsar-cluster.acme.com:6650')
+```
+
+> **Note**
+> In Pulsar, each topic is handled by only one broker. Initial requests from a client to read, update or delete a topic are sent to a broker that may not be the topic owner. If the broker cannot handle the request for this topic, it redirects the request to the appropriate broker.
diff --git a/site2/website-next/docs/concepts-authentication.md b/site2/website-next/docs/concepts-authentication.md
new file mode 100644
index 0000000..ae1232d
--- /dev/null
+++ b/site2/website-next/docs/concepts-authentication.md
@@ -0,0 +1,8 @@
+---
+id: concepts-authentication
+title: Authentication and Authorization
+sidebar_label: Authentication and Authorization
+---
+
+Pulsar supports a pluggable [authentication](security-overview.md) mechanism which can be configured at the proxy and/or the broker. Pulsar also supports a pluggable [authorization](security-authorization.md) mechanism. These mechanisms work together to identify the client and its access rights on topics, namespaces and tenants.
+
diff --git a/site2/website-next/docs/concepts-clients.md b/site2/website-next/docs/concepts-clients.md
new file mode 100644
index 0000000..9fe6d46
--- /dev/null
+++ b/site2/website-next/docs/concepts-clients.md
@@ -0,0 +1,84 @@
+---
+id: concepts-clients
+title: Pulsar Clients
+sidebar_label: Clients
+---
+
+Pulsar exposes a client API with language bindings for [Java](client-libraries-java.md), [Go](client-libraries-go.md), [Python](client-libraries-python.md), [C++](client-libraries-cpp.md) and [C#](client-libraries-dotnet.md). The client API optimizes and encapsulates Pulsar's client-broker communication protocol and exposes a simple and intuitive API for use by applications.
+
+Under the hood, the current official Pulsar client libraries support transparent reconnection and/or connection failover to brokers, queuing of messages until acknowledged by the broker, and heuristics such as connection retries with backoff.
+
+> **Custom client libraries**
+> If you'd like to create your own client library, we recommend consulting the documentation on Pulsar's custom [binary protocol](developing-binary-protocol.md).
+
+
+## Client setup phase
+
+Before an application creates a producer/consumer, the Pulsar client library needs to initiate a setup phase including two steps:
+
+1. The client attempts to determine the owner of the topic by sending an HTTP lookup request to the broker. The request could reach one of the active brokers which, by looking at the (cached) zookeeper metadata knows who is serving the topic or, in case nobody is serving it, tries to assign it to the least loaded broker.
+1. Once the client library has the broker address, it creates a TCP connection (or reuse an existing connection from the pool) and authenticates it. Within this connection, client and broker exchange binary commands from a custom protocol. At this point the client sends a command to create producer/consumer to the broker, which will comply after having validated the authorization policy.
+
+Whenever the TCP connection breaks, the client immediately re-initiates this setup phase and keeps trying with exponential backoff to re-establish the producer or consumer until the operation succeeds.
+
+## Reader interface
+
+In Pulsar, the "standard" [consumer interface](concepts-messaging.md#consumers) involves using consumers to listen on [topics](reference-terminology.md#topic), process incoming messages, and finally acknowledge those messages when they are processed. Whenever a new subscription is created, it is initially positioned at the end of the topic (by default), and consumers associated with that subscription begin reading with the first message created afterwards.  Whenever a consumer connects t [...]
+
+The **reader interface** for Pulsar enables applications to manually manage cursors. When you use a reader to connect to a topic---rather than a consumer---you need to specify *which* message the reader begins reading from when it connects to a topic. When connecting to a topic, the reader interface enables you to begin with:
+
+* The **earliest** available message in the topic
+* The **latest** available message in the topic
+* Some other message between the earliest and the latest. If you select this option, you'll need to explicitly provide a message ID. Your application will be responsible for "knowing" this message ID in advance, perhaps fetching it from a persistent data store or cache.
+
+The reader interface is helpful for use cases like using Pulsar to provide effectively-once processing semantics for a stream processing system. For this use case, it's essential that the stream processing system be able to "rewind" topics to a specific message and begin reading there. The reader interface provides Pulsar clients with the low-level abstraction necessary to "manually position" themselves within a topic.
+
+Internally, the reader interface is implemented as a consumer using an exclusive, non-durable subscription to the topic with a randomly-allocated name.
+
+[ **IMPORTANT** ]
+
+Unlike subscription/consumer, readers are non-durable in nature and does not prevent data in a topic from being deleted, thus it is ***strongly*** advised that [data retention](cookbooks-retention-expiry.md) be configured. If data retention for a topic is not configured for an adequate amount of time, messages that the reader has not yet read might be deleted .  This causes the readers to essentially skip messages. Configuring the data retention for a topic guarantees the reader with a c [...]
+
+Please also note that a reader can have a "backlog", but the metric is only used for users to know how behind the reader is. The metric is not considered for any backlog quota calculations. 
+
+![The Pulsar consumer and reader interfaces](/assets/pulsar-reader-consumer-interfaces.png)
+
+Here's a Java example that begins reading from the earliest available message on a topic:
+
+```java
+import org.apache.pulsar.client.api.Message;
+import org.apache.pulsar.client.api.MessageId;
+import org.apache.pulsar.client.api.Reader;
+
+// Create a reader on a topic and for a specific message (and onward)
+Reader<byte[]> reader = pulsarClient.newReader()
+    .topic("reader-api-test")
+    .startMessageId(MessageId.earliest)
+    .create();
+
+while (true) {
+    Message message = reader.readNext();
+
+    // Process the message
+}
+```
+
+To create a reader that reads from the latest available message:
+
+```java
+Reader<byte[]> reader = pulsarClient.newReader()
+    .topic(topic)
+    .startMessageId(MessageId.latest)
+    .create();
+```
+
+To create a reader that reads from some message between the earliest and the latest:
+
+```java
+byte[] msgIdBytes = // Some byte array
+MessageId id = MessageId.fromByteArray(msgIdBytes);
+Reader<byte[]> reader = pulsarClient.newReader()
+    .topic(topic)
+    .startMessageId(id)
+    .create();
+```
diff --git a/site2/website-next/docs/concepts-messaging.md b/site2/website-next/docs/concepts-messaging.md
new file mode 100644
index 0000000..1da3297
--- /dev/null
+++ b/site2/website-next/docs/concepts-messaging.md
@@ -0,0 +1,608 @@
+---
+id: concepts-messaging
+title: Messaging
+sidebar_label: Messaging
+---
+
+Pulsar is built on the [publish-subscribe](https://en.wikipedia.org/wiki/Publish%E2%80%93subscribe_pattern) pattern (often abbreviated to pub-sub). In this pattern, [producers](#producers) publish messages to [topics](#topics); [consumers](#consumers) [subscribe](#subscription-modes) to those topics, process incoming messages, and send [acknowledgements](#acknowledgement) to the broker when processing is finished.
+
+When a subscription is created, Pulsar [retains](concepts-architecture-overview.md#persistent-storage) all messages, even if the consumer is disconnected. The retained messages are discarded only when a consumer acknowledges that all these messages are processed successfully. 
+
+If the consumption of a message fails and you want this message to be consumed again, then you can enable the automatic redelivery of this message by sending a [negative acknowledgement](negative-acknowledgement) to the broker or enabling the [acknowledgement timeout](acknowledgement-timeout) for unacknowledged messages.
+
+## Messages
+
+Messages are the basic "unit" of Pulsar. The following table lists the components of messages.
+
+Component | Description
+:---------|:-------
+Value / data payload | The data carried by the message. All Pulsar messages contain raw bytes, although message data can also conform to data [schemas](schema-get-started.md).
+Key | Messages are optionally tagged with keys, which is useful for things like [topic compaction](concepts-topic-compaction.md).
+Properties | An optional key/value map of user-defined properties.
+Producer name | The name of the producer who produces the message. If you do not specify a producer name, the default name is used. 
+Sequence ID | Each Pulsar message belongs to an ordered sequence on its topic. The sequence ID of the message is its order in that sequence.
+Publish time | The timestamp of when the message is published. The timestamp is automatically applied by the producer.
+Event time | An optional timestamp attached to a message by applications. For example, applications attach a timestamp on when the message is processed. If nothing is set to event time, the value is `0`. 
+TypedMessageBuilder | It is used to construct a message. You can set message properties such as the message key, message value with `TypedMessageBuilder`. <br /> When you set `TypedMessageBuilder`, set the key as a string. If you set the key as other types, for example, an AVRO object, the key is sent as bytes, and it is difficult to get the AVRO object back on the consumer.
+
+The default size of a message is 5 MB. You can configure the max size of a message with the following configurations.
+
+- In the `broker.conf` file.
+
+    ```bash
+    # The max size of a message (in bytes).
+    maxMessageSize=5242880
+    ```
+
+- In the `bookkeeper.conf` file.
+
+    ```bash
+    # The max size of the netty frame (in bytes). Any messages received larger than this value are rejected. The default value is 5 MB.
+    nettyMaxFrameSizeBytes=5253120
+    ```
+> For more information on Pulsar messages, see Pulsar [binary protocol](developing-binary-protocol.md).
+
+## Producers
+
+A producer is a process that attaches to a topic and publishes messages to a Pulsar [broker](reference-terminology.md#broker). The Pulsar broker processes the messages.
+
+### Send modes
+
+Producers send messages to brokers synchronously (sync) or asynchronously (async).
+
+| Mode       | Description |
+|:-----------|-----------|
+| Sync send  | The producer waits for an acknowledgement from the broker after sending every message. If the acknowledgment is not received, the producer treats the sending operation as a failure.                                                                                                                                                                                    |
+| Async send | The producer puts a message in a blocking queue and returns immediately. The client library sends the message to the broker in the background. If the queue is full (you can [configure](reference-configuration.md#broker) the maximum size), the producer is blocked or fails immediately when calling the API, depending on arguments passed to the producer. |
+
+### Access mode
+
+You can have different types of access modes on topics for producers.
+
+|Access mode | Description
+|---|---
+`Shared`|Multiple producers can publish on a topic. <br /><br />This is the **default** setting.
+`Exclusive`|Only one producer can publish on a topic. <br /><br />If there is already a producer connected, other producers trying to publish on this topic get errors immediately.<br /><br />The “old” producer is evicted and a “new” producer is selected to be the next exclusive producer if the “old” producer experiences a network partition with the broker.
+`WaitForExclusive`|If there is already a producer connected, the producer creation is pending (rather than timing out) until the producer gets the `Exclusive` access.<br /><br />The producer that succeeds in becoming the exclusive one is treated as the leader. Consequently, if you want to implement the leader election scheme for your application, you can use this access mode.
+
+> **Note**
+>
+> Once an application creates a producer with `Exclusive` or `WaitForExclusive` access mode successfully, the instance of this application is guaranteed to be the **only writer** to the topic. Any other producers trying to produce messages on this topic will either get errors immediately or have to wait until they get the `Exclusive` access. 
+> 
+> For more information, see [PIP 68: Exclusive Producer](https://github.com/apache/pulsar/wiki/PIP-68:-Exclusive-Producer).
+
+You can set producer access mode through Java Client API. For more information, see `ProducerAccessMode` in [ProducerBuilder.java](https://github.com/apache/pulsar/blob/fc5768ca3bbf92815d142fe30e6bfad70a1b4fc6/pulsar-client-api/src/main/java/org/apache/pulsar/client/api/ProducerBuilder.java) file.
+
+
+### Compression
+
+You can compress messages published by producers during transportation. Pulsar currently supports the following types of compression:
+
+* [LZ4](https://github.com/lz4/lz4)
+* [ZLIB](https://zlib.net/)
+* [ZSTD](https://facebook.github.io/zstd/)
+* [SNAPPY](https://google.github.io/snappy/)
+
+### Batching
+
+When batching is enabled, the producer accumulates and sends a batch of messages in a single request. The batch size is defined by the maximum number of messages and the maximum publish latency. Therefore, the backlog size represents the total number of batches instead of the total number of messages.
+
+In Pulsar, batches are tracked and stored as single units rather than as individual messages. Consumer unbundles a batch into individual messages. However, scheduled messages (configured through the `deliverAt` or the `deliverAfter` parameter) are always sent as individual messages even batching is enabled.
+
+In general, a batch is acknowledged when all of its messages are acknowledged by a consumer. It means that when **not all** batch messages are acknowledged, then unexpected failures, negative acknowledgements, or acknowledgement timeouts can result in a redelivery of all messages in this batch.
+
+To avoid redelivering acknowledged messages in a batch to the consumer, Pulsar introduces batch index acknowledgement since Pulsar 2.6.0. When batch index acknowledgement is enabled, the consumer filters out the batch index that has been acknowledged and sends the batch index acknowledgement request to the broker. The broker maintains the batch index acknowledgement status and tracks the acknowledgement status of each batch index to avoid dispatching acknowledged messages to the consumer [...]
+
+By default, batch index acknowledgement is disabled (`acknowledgmentAtBatchIndexLevelEnabled=false`). You can enable batch index acknowledgement by setting the `acknowledgmentAtBatchIndexLevelEnabled` parameter to `true` at the broker side. Enabling batch index acknowledgement results in more memory overheads. 
+
+### Chunking
+Before you enable chunking, read the following instructions.
+- Batching and chunking cannot be enabled simultaneously. To enable chunking, you must disable batching in advance.
+- Chunking is only supported for persisted topics.
+- Chunking is only supported for the exclusive and failover subscription modes.
+
+When chunking is enabled (`chunkingEnabled=true`), if the message size is greater than the allowed maximum publish-payload size, the producer splits the original message into chunked messages and publishes them with chunked metadata to the broker separately and in order. At the broker side, the chunked messages are stored in the managed-ledger in the same way as that of ordinary messages. The only difference is that the consumer needs to buffer the chunked messages and combines them into [...]
+
+The consumer consumes the chunked messages and buffers them until the consumer receives all the chunks of a message. And then the consumer stitches chunked messages together and places them into the receiver-queue. Clients consume messages from the receiver-queue. Once the consumer consumes the entire large message and acknowledges it, the consumer internally sends acknowledgement of all the chunk messages associated to that large message. You can set the `maxPendingChunkedMessage` param [...]
+
+The broker does not require any changes to support chunking for non-shared subscription. The broker only uses `chunkedMessageRate` to record chunked message rate on the topic.
+
+#### Handle chunked messages with one producer and one ordered consumer
+
+As shown in the following figure, when a topic has one producer which publishes large message payload in chunked messages along with regular non-chunked messages. The producer publishes message M1 in three chunks M1-C1, M1-C2 and M1-C3. The broker stores all the three chunked messages in the managed-ledger and dispatches to the ordered (exclusive/failover) consumer in the same order. The consumer buffers all the chunked messages in memory until it receives all the chunked messages, combi [...]
+
+![](/assets/chunking-01.png)
+
+#### Handle chunked messages with multiple producers and one ordered consumer
+
+When multiple publishers publish chunked messages into a single topic, the broker stores all the chunked messages coming from different publishers in the same managed-ledger. As shown below, Producer 1 publishes message M1 in three chunks M1-C1, M1-C2 and M1-C3. Producer 2 publishes message M2 in three chunks M2-C1, M2-C2 and M2-C3. All chunked messages of the specific message are still in order but might not be consecutive in the managed-ledger. This brings some memory pressure to the c [...]
+
+![](/assets/chunking-02.png)
+
+## Consumers
+
+A consumer is a process that attaches to a topic via a subscription and then receives messages.
+
+A consumer sends a [flow permit request](developing-binary-protocol.md#flow-control) to a broker to get messages. There is a queue at the consumer side to receive messages pushed from the broker. You can configure the queue size with the [`receiverQueueSize`](client-libraries-java.md#configure-consumer) parameter. The default size is `1000`). Each time `consumer.receive()` is called, a message is dequeued from the buffer.  
+
+### Receive modes
+
+Messages are received from [brokers](reference-terminology.md#broker) either synchronously (sync) or asynchronously (async).
+
+| Mode          | Description                                                                                                                                                                                                   |
+|:--------------|:--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| Sync receive  | A sync receive is blocked until a message is available.                                                                                                                                                  |
+| Async receive | An async receive returns immediately with a future value—for example, a [`CompletableFuture`](http://www.baeldung.com/java-completablefuture) in Java—that completes once a new message is available. |
+
+### Listeners
+
+Client libraries provide listener implementation for consumers. For example, the [Java client](client-libraries-java.md) provides a {@inject: javadoc:MesssageListener:/client/org/apache/pulsar/client/api/MessageListener} interface. In this interface, the `received` method is called whenever a new message is received.
+
+### Acknowledgement
+
+The consumer sends an acknowledgement request to the broker after it consumes a message successfully. Then, this consumed message will be permanently stored, and be deleted only after all the subscriptions have acknowledged it. If you want to store the messages that have been acknowledged by a consumer, you need to configure the [message retention policy](concepts-messaging.md#message-retention-and-expiry).
+
+For batch messages, you can enable batch index acknowledgement to avoid dispatching acknowledged messages to the consumer. For details about batch index acknowledgement, see [batching](#batching).
+
+Messages can be acknowledged in one of the following two ways:
+
+- Being acknowledged individually. With individual acknowledgement, the consumer acknowledges each message and sends an acknowledgement request to the broker.
+- Being acknowledged cumulatively. With cumulative acknowledgement, the consumer **only** acknowledges the last message it received. All messages in the stream up to (and including) the provided message are not redelivered to that consumer.
+
+If you want to acknowledge messages individually, you can use the following API.
+
+```java
+consumer.acknowledge(msg);
+```
+If you want to acknowledge messages cumulatively, you can use the following API.
+```java
+consumer.acknowledgeCumulative(msg);
+```
+
+
+> **Note**  
+> Cumulative acknowledgement cannot be used in the [shared subscription mode](#subscription-modes), because the shared subscription mode involves multiple consumers who have access to the same subscription. In the shared subscription mode, messages are acknowledged individually.
+
+### Negative acknowledgement
+
+When a consumer fails to consume a message and intends to consume it again, this consumer should send a negative acknowledgement to the broker. Then, the broker will redeliver this message to the consumer.
+
+Messages are negatively acknowledged individually or cumulatively, depending on the consumption subscription mode.
+
+In the exclusive and failover subscription modes, consumers only negatively acknowledge the last message they receive.
+
+In the shared and Key_Shared subscription modes, consumers can negatively acknowledge messages individually.
+
+Be aware that negative acknowledgments on ordered subscription types, such as Exclusive, Failover and Key_Shared, might cause failed messages being sent to consumers out of the original order.
+
+If you want to acknowledge messages negatively, you can use the following API.
+
+```java
+//With calling this api, messages are negatively acknowledged 
+consumer.negativeAcknowledge(msg);
+```
+
+> **Note**  
+> If batching is enabled, all messages in one batch are redelivered to the consumer.
+
+### Acknowledgement timeout
+
+If a message is not consumed successfully, and you want the broker to redeliver this message automatically, then you can enable automatic redelivery mechanism for  unacknowledged messages. With automatic redelivery enabled, the client tracks the unacknowledged messages within the entire `acktimeout` time range, and sends a `redeliver unacknowledged messages` request to the broker automatically when the acknowledgement timeout is specified.
+
+> **Note**  
+> - If batching is enabled, all messages in one batch are redelivered to the consumer.  
+> - The negative acknowledgement is preferable over the acknowledgement timeout, since negative acknowledgement controls the redelivery of individual messages more precisely and avoids invalid redeliveries when the message processing time exceeds the acknowledgement timeout.
+
+### Dead letter topic
+
+Dead letter topic enables you to consume new messages when some messages cannot be consumed successfully by a consumer. In this mechanism, messages that are failed to be consumed are stored in a separate topic, which is called dead letter topic. You can decide how to handle messages in the dead letter topic.
+
+The following example shows how to enable dead letter topic in a Java client using the default dead letter topic:
+
+```java
+Consumer<byte[]> consumer = pulsarClient.newConsumer(Schema.BYTES)
+              .topic(topic)
+              .subscriptionName("my-subscription")
+              .subscriptionType(SubscriptionType.Shared)
+              .deadLetterPolicy(DeadLetterPolicy.builder()
+                    .maxRedeliverCount(maxRedeliveryCount)
+                    .build())
+              .subscribe();
+                
+```
+The default dead letter topic uses this format: 
+```
+<topicname>-<subscriptionname>-DLQ
+```
+
+If you want to specify the name of the dead letter topic, use this Java client example:
+
+```java
+Consumer<byte[]> consumer = pulsarClient.newConsumer(Schema.BYTES)
+              .topic(topic)
+              .subscriptionName("my-subscription")
+              .subscriptionType(SubscriptionType.Shared)
+              .deadLetterPolicy(DeadLetterPolicy.builder()
+                    .maxRedeliverCount(maxRedeliveryCount)
+                    .deadLetterTopic("your-topic-name")
+                    .build())
+              .subscribe();
+                
+```
+
+Dead letter topic depends on message redelivery. Messages are redelivered either due to [acknowledgement timeout](#acknowledgement-timeout) or [negative acknowledgement](#negative-acknowledgement). If you are going to use negative acknowledgement on a message, make sure it is negatively acknowledged before the acknowledgement timeout. 
+
+> **Note**    
+> Currently, dead letter topic is enabled in the Shared and Key_Shared subscription modes.
+
+### Retry letter topic
+
+For many online business systems, a message is re-consumed due to exception occurs in the business logic processing. To configure the delay time for re-consuming the failed messages, you can configure the producer to send messages to both the business topic and the retry letter topic, and enable automatic retry on the consumer. When automatic retry is enabled on the consumer, a message is stored in the retry letter topic if the messages are not consumed, and therefore the consumer automa [...]
+
+By default, automatic retry is disabled. You can set `enableRetry` to `true` to enable automatic retry on the consumer.
+
+This example shows how to consume messages from a retry letter topic.
+
+```java
+Consumer<byte[]> consumer = pulsarClient.newConsumer(Schema.BYTES)
+                .topic(topic)
+                .subscriptionName("my-subscription")
+                .subscriptionType(SubscriptionType.Shared)
+                .enableRetry(true)
+                .receiverQueueSize(100)
+                .deadLetterPolicy(DeadLetterPolicy.builder()
+                        .maxRedeliverCount(maxRedeliveryCount)
+                        .retryLetterTopic("persistent://my-property/my-ns/my-subscription-custom-Retry")
+                        .build())
+                .subscriptionInitialPosition(SubscriptionInitialPosition.Earliest)
+                .subscribe();
+```
+
+If you want to put messages into a retrial queue, you can use the following API.
+
+```java
+consumer.reconsumeLater(msg,3,TimeUnit.SECONDS);
+```
+
+
+
+## Topics
+
+As in other pub-sub systems, topics in Pulsar are named channels for transmitting messages from producers to consumers. Topic names are URLs that have a well-defined structure:
+
+```http
+{persistent|non-persistent}://tenant/namespace/topic
+```
+
+Topic name component | Description
+:--------------------|:-----------
+`persistent` / `non-persistent` | This identifies the type of topic. Pulsar supports two kind of topics: [persistent](concepts-architecture-overview.md#persistent-storage) and [non-persistent](#non-persistent-topics). The default is persistent, so if you do not specify a type, the topic is persistent. With persistent topics, all messages are durably persisted on disks (if the broker is not standalone, messages are durably persisted on multiple disks), whereas data for non-persistent topi [...]
+`tenant`             | The topic tenant within the instance. Tenants are essential to multi-tenancy in Pulsar, and spread across clusters.
+`namespace`          | The administrative unit of the topic, which acts as a grouping mechanism for related topics. Most topic configuration is performed at the [namespace](#namespaces) level. Each tenant has one or multiple namespaces.
+`topic`              | The final part of the name. Topic names have no special meaning in a Pulsar instance.
+
+> **No need to explicitly create new topics**  
+> You do not need to explicitly create topics in Pulsar. If a client attempts to write or receive messages to/from a topic that does not yet exist, Pulsar creates that topic under the namespace provided in the [topic name](#topics) automatically.
+> If no tenant or namespace is specified when a client creates a topic, the topic is created in the default tenant and namespace. You can also create a topic in a specified tenant and namespace, such as `persistent://my-tenant/my-namespace/my-topic`. `persistent://my-tenant/my-namespace/my-topic` means the `my-topic` topic is created in the `my-namespace` namespace of the `my-tenant` tenant.
+
+## Namespaces
+
+A namespace is a logical nomenclature within a tenant. A tenant creates multiple namespaces via the [admin API](admin-api-namespaces.md#create). For instance, a tenant with different applications can create a separate namespace for each application. A namespace allows the application to create and manage a hierarchy of topics. The topic `my-tenant/app1` is a namespace for the application `app1` for `my-tenant`. You can create any number of [topics](#topics) under the namespace.
+
+## Subscriptions
+
+A subscription is a named configuration rule that determines how messages are delivered to consumers. Four subscription modes are available in Pulsar: [exclusive](#exclusive), [shared](#shared), [failover](#failover), and [key_shared](#key_shared). These modes are illustrated in the figure below.
+
+![Subscription modes](/assets/pulsar-subscription-modes.png)
+
+> **Pub-Sub or Queuing**  
+> In Pulsar, you can use different subscriptions flexibly.
+> * If you want to achieve traditional "fan-out pub-sub messaging" among consumers, specify a unique subscription name for each consumer. It is exclusive subscription mode.
+> * If you want to achieve "message queuing" among consumers, share the same subscription name among multiple consumers(shared, failover, key_shared).
+> * If you want to achieve both effects simultaneously, combine exclusive subscription mode with other subscription modes for consumers.
+
+### Consumerless Subscriptions and Their Corresponding Modes
+When a subscription has no consumers, its subscription mode is undefined. A subscription's mode is defined when a consumer connects to the subscription, and the mode can be changed by restarting all consumers with a different configuration.
+
+### Exclusive
+
+In *exclusive* mode, only a single consumer is allowed to attach to the subscription. If multiple consumers subscribe to a topic using the same subscription, an error occurs.
+
+In the diagram below, only **Consumer A-0** is allowed to consume messages.
+
+> Exclusive mode is the default subscription mode.
+
+![Exclusive subscriptions](/assets/pulsar-exclusive-subscriptions.png)
+
+### Failover
+
+In *failover* mode, multiple consumers can attach to the same subscription. A master consumer is picked for non-partitioned topic or each partition of partitioned topic and receives messages. When the master consumer disconnects, all (non-acknowledged and subsequent) messages are delivered to the next consumer in line.
+
+For partitioned topics, broker will sort consumers by priority level and lexicographical order of consumer name. Then broker will try to evenly assigns topics to consumers with the highest priority level.
+
+For non-partitioned topic, broker will pick consumer in the order they subscribe to the non partitioned topic.
+
+In the diagram below, **Consumer-B-0** is the master consumer while **Consumer-B-1** would be the next consumer in line to receive messages if **Consumer-B-0** is disconnected.
+
+![Failover subscriptions](/assets/pulsar-failover-subscriptions.png)
+
+### Shared
+
+In *shared* or *round robin* mode, multiple consumers can attach to the same subscription. Messages are delivered in a round robin distribution across consumers, and any given message is delivered to only one consumer. When a consumer disconnects, all the messages that were sent to it and not acknowledged will be rescheduled for sending to the remaining consumers.
+
+In the diagram below, **Consumer-C-1** and **Consumer-C-2** are able to subscribe to the topic, but **Consumer-C-3** and others could as well.
+
+> **Limitations of shared mode**  
+> When using shared mode, be aware that:
+> * Message ordering is not guaranteed.
+> * You cannot use cumulative acknowledgment with shared mode.
+
+![Shared subscriptions](/assets/pulsar-shared-subscriptions.png)
+
+### Key_Shared
+
+In *Key_Shared* mode, multiple consumers can attach to the same subscription. Messages are delivered in a distribution across consumers and message with same key or same ordering key are delivered to only one consumer. No matter how many times the message is re-delivered, it is delivered to the same consumer. When a consumer connected or disconnected will cause served consumer change for some key of message.
+
+![Key_Shared subscriptions](/assets/pulsar-key-shared-subscriptions.png)
+
+Note that when the consumers are using the Key_Shared subscription mode, you need to **disable batching** or **use key-based batching** for the producers. There are two reasons why the key-based batching is necessary for Key_Shared subscription mode:
+1. The broker dispatches messages according to the keys of the messages, but the default batching approach might fail to pack the messages with the same key to the same batch. 
+2. Since it is the consumers instead of the broker who dispatch the messages from the batches, the key of the first message in one batch is considered as the key of all messages in this batch, thereby leading to context errors. 
+
+The key-based batching aims at resolving the above-mentioned issues. This batching method ensures that the producers pack the messages with the same key to the same batch. The messages without a key are packed into one batch and this batch has no key. When the broker dispatches messages from this batch, it uses `NON_KEY` as the key. In addition, each consumer is associated with **only one** key and should receive **only one message batch** for the connected key. By default, you can limit [...]
+
+Below are examples of enabling the key-based batching under the Key_Shared subscription mode, with `client` being the Pulsar client that you created.
+
+<!--DOCUSAURUS_CODE_TABS-->
+<!--Java-->
+```
+Producer<byte[]> producer = client.newProducer()
+        .topic("my-topic")
+        .batcherBuilder(BatcherBuilder.KEY_BASED)
+        .create();
+```
+
+<!--C++-->
+```
+ProducerConfiguration producerConfig;
+producerConfig.setBatchingType(ProducerConfiguration::BatchingType::KeyBasedBatching);
+Producer producer;
+client.createProducer("my-topic", producerConfig, producer);
+```
+
+<!--Python-->
+```
+producer = client.create_producer(topic='my-topic', batching_type=pulsar.BatchingType.KeyBased)
+```
+<!--END_DOCUSAURUS_CODE_TABS-->
+
+> **Limitations of Key_Shared mode**  
+> When you use Key_Shared mode, be aware that:
+> * You need to specify a key or orderingKey for messages.
+> * You cannot use cumulative acknowledgment with Key_Shared mode.
+
+## Multi-topic subscriptions
+
+When a consumer subscribes to a Pulsar topic, by default it subscribes to one specific topic, such as `persistent://public/default/my-topic`. As of Pulsar version 1.23.0-incubating, however, Pulsar consumers can simultaneously subscribe to multiple topics. You can define a list of topics in two ways:
+
+* On the basis of a [**reg**ular **ex**pression](https://en.wikipedia.org/wiki/Regular_expression) (regex), for example `persistent://public/default/finance-.*`
+* By explicitly defining a list of topics
+
+> When subscribing to multiple topics by regex, all topics must be in the same [namespace](#namespaces).
+
+When subscribing to multiple topics, the Pulsar client automatically makes a call to the Pulsar API to discover the topics that match the regex pattern/list, and then subscribe to all of them. If any of the topics do not exist, the consumer auto-subscribes to them once the topics are created.
+
+> **No ordering guarantees across multiple topics**  
+> When a producer sends messages to a single topic, all messages are guaranteed to be read from that topic in the same order. However, these guarantees do not hold across multiple topics. So when a producer sends message to multiple topics, the order in which messages are read from those topics is not guaranteed to be the same.
+
+The following are multi-topic subscription examples for Java.
+
+```java
+import java.util.regex.Pattern;
+
+import org.apache.pulsar.client.api.Consumer;
+import org.apache.pulsar.client.api.PulsarClient;
+
+PulsarClient pulsarClient = // Instantiate Pulsar client object
+
+// Subscribe to all topics in a namespace
+Pattern allTopicsInNamespace = Pattern.compile("persistent://public/default/.*");
+Consumer<byte[]> allTopicsConsumer = pulsarClient.newConsumer()
+                .topicsPattern(allTopicsInNamespace)
+                .subscriptionName("subscription-1")
+                .subscribe();
+
+// Subscribe to a subsets of topics in a namespace, based on regex
+Pattern someTopicsInNamespace = Pattern.compile("persistent://public/default/foo.*");
+Consumer<byte[]> someTopicsConsumer = pulsarClient.newConsumer()
+                .topicsPattern(someTopicsInNamespace)
+                .subscriptionName("subscription-1")
+                .subscribe();
+```
+
+For code examples, see [Java](client-libraries-java.md#multi-topic-subscriptions).
+
+## Partitioned topics
+
+Normal topics are served only by a single broker, which limits the maximum throughput of the topic. *Partitioned topics* are a special type of topic that are handled by multiple brokers, thus allowing for higher throughput.
+
+A partitioned topic is actually implemented as N internal topics, where N is the number of partitions. When publishing messages to a partitioned topic, each message is routed to one of several brokers. The distribution of partitions across brokers is handled automatically by Pulsar.
+
+The diagram below illustrates this:
+
+![](/assets/partitioning.png)
+
+The **Topic1** topic has five partitions (**P0** through **P4**) split across three brokers. Because there are more partitions than brokers, two brokers handle two partitions a piece, while the third handles only one (again, Pulsar handles this distribution of partitions automatically).
+
+Messages for this topic are broadcast to two consumers. The [routing mode](#routing-modes) determines each message should be published to which partition, while the [subscription mode](#subscription-modes) determines which messages go to which consumers.
+
+Decisions about routing and subscription modes can be made separately in most cases. In general, throughput concerns should guide partitioning/routing decisions while subscription decisions should be guided by application semantics.
+
+There is no difference between partitioned topics and normal topics in terms of how subscription modes work, as partitioning only determines what happens between when a message is published by a producer and processed and acknowledged by a consumer.
+
+Partitioned topics need to be explicitly created via the [admin API](admin-api-overview.md). The number of partitions can be specified when creating the topic.
+
+### Routing modes
+
+When publishing to partitioned topics, you must specify a *routing mode*. The routing mode determines which partition---that is, which internal topic---each message should be published to.
+
+There are three {@inject: javadoc:MessageRoutingMode:/client/org/apache/pulsar/client/api/MessageRoutingMode} available:
+
+Mode     | Description 
+:--------|:------------
+`RoundRobinPartition` | If no key is provided, the producer will publish messages across all partitions in round-robin fashion to achieve maximum throughput. Please note that round-robin is not done per individual message but rather it's set to the same boundary of batching delay, to ensure batching is effective. While if a key is specified on the message, the partitioned producer will hash the key and assign message to a particular partition. This is the default mode. 
+`SinglePartition`     | If no key is provided, the producer will randomly pick one single partition and publish all the messages into that partition. While if a key is specified on the message, the partitioned producer will hash the key and assign message to a particular partition.
+`CustomPartition`     | Use custom message router implementation that will be called to determine the partition for a particular message. User can create a custom routing mode by using the [Java client](client-libraries-java.md) and implementing the {@inject: javadoc:MessageRouter:/client/org/apache/pulsar/client/api/MessageRouter} interface.
+
+### Ordering guarantee
+
+The ordering of messages is related to MessageRoutingMode and Message Key. Usually, user would want an ordering of Per-key-partition guarantee.
+
+If there is a key attached to message, the messages will be routed to corresponding partitions based on the hashing scheme specified by {@inject: javadoc:HashingScheme:/client/org/apache/pulsar/client/api/HashingScheme} in {@inject: javadoc:ProducerBuilder:/client/org/apache/pulsar/client/api/ProducerBuilder}, when using either `SinglePartition` or `RoundRobinPartition` mode.
+
+Ordering guarantee | Description | Routing Mode and Key
+:------------------|:------------|:------------
+Per-key-partition  | All the messages with the same key will be in order and be placed in same partition. | Use either `SinglePartition` or `RoundRobinPartition` mode, and Key is provided by each message.
+Per-producer       | All the messages from the same producer will be in order. | Use `SinglePartition` mode, and no Key is provided for each message.
+
+### Hashing scheme
+
+{@inject: javadoc:HashingScheme:/client/org/apache/pulsar/client/api/HashingScheme} is an enum that represent sets of standard hashing functions available when choosing the partition to use for a particular message.
+
+There are 2 types of standard hashing functions available: `JavaStringHash` and `Murmur3_32Hash`. 
+The default hashing function for producer is `JavaStringHash`.
+Please pay attention that `JavaStringHash` is not useful when producers can be from different multiple language clients, under this use case, it is recommended to use `Murmur3_32Hash`.
+
+
+
+## Non-persistent topics
+
+
+By default, Pulsar persistently stores *all* unacknowledged messages on multiple [BookKeeper](concepts-architecture-overview.md#persistent-storage) bookies (storage nodes). Data for messages on persistent topics can thus survive broker restarts and subscriber failover.
+
+Pulsar also, however, supports **non-persistent topics**, which are topics on which messages are *never* persisted to disk and live only in memory. When using non-persistent delivery, killing a Pulsar broker or disconnecting a subscriber to a topic means that all in-transit messages are lost on that (non-persistent) topic, meaning that clients may see message loss.
+
+Non-persistent topics have names of this form (note the `non-persistent` in the name):
+
+```http
+non-persistent://tenant/namespace/topic
+```
+
+> For more info on using non-persistent topics, see the [Non-persistent messaging cookbook](cookbooks-non-persistent.md).
+
+In non-persistent topics, brokers immediately deliver messages to all connected subscribers *without persisting them* in [BookKeeper](concepts-architecture-overview.md#persistent-storage). If a subscriber is disconnected, the broker will not be able to deliver those in-transit messages, and subscribers will never be able to receive those messages again. Eliminating the persistent storage step makes messaging on non-persistent topics slightly faster than on persistent topics in some cases [...]
+
+> With non-persistent topics, message data lives only in memory. If a message broker fails or message data can otherwise not be retrieved from memory, your message data may be lost. Use non-persistent topics only if you're *certain* that your use case requires it and can sustain it.
+
+By default, non-persistent topics are enabled on Pulsar brokers. You can disable them in the broker's [configuration](reference-configuration.md#broker-enableNonPersistentTopics). You can manage non-persistent topics using the `pulsar-admin topics` command. For more information, see [`pulsar-admin`](https://pulsar.apache.org/tools/pulsar-admin/).
+
+### Performance
+
+Non-persistent messaging is usually faster than persistent messaging because brokers don't persist messages and immediately send acks back to the producer as soon as that message is delivered to connected brokers. Producers thus see comparatively low publish latency with non-persistent topic.
+
+### Client API
+
+Producers and consumers can connect to non-persistent topics in the same way as persistent topics, with the crucial difference that the topic name must start with `non-persistent`. All three subscription modes---[exclusive](#exclusive), [shared](#shared), and [failover](#failover)---are supported for non-persistent topics.
+
+Here's an example [Java consumer](client-libraries-java.md#consumers) for a non-persistent topic:
+
+```java
+PulsarClient client = PulsarClient.builder()
+        .serviceUrl("pulsar://localhost:6650")
+        .build();
+String npTopic = "non-persistent://public/default/my-topic";
+String subscriptionName = "my-subscription-name";
+
+Consumer<byte[]> consumer = client.newConsumer()
+        .topic(npTopic)
+        .subscriptionName(subscriptionName)
+        .subscribe();
+```
+
+Here's an example [Java producer](client-libraries-java.md#producer) for the same non-persistent topic:
+
+```java
+Producer<byte[]> producer = client.newProducer()
+                .topic(npTopic)
+                .create();
+```
+
+## Message retention and expiry
+
+By default, Pulsar message brokers:
+
+* immediately delete *all* messages that have been acknowledged by a consumer, and
+* [persistently store](concepts-architecture-overview.md#persistent-storage) all unacknowledged messages in a message backlog.
+
+Pulsar has two features, however, that enable you to override this default behavior:
+
+* Message **retention** enables you to store messages that have been acknowledged by a consumer
+* Message **expiry** enables you to set a time to live (TTL) for messages that have not yet been acknowledged
+
+> All message retention and expiry is managed at the [namespace](#namespaces) level. For a how-to, see the [Message retention and expiry](cookbooks-retention-expiry.md) cookbook.
+
+The diagram below illustrates both concepts:
+
+![Message retention and expiry](/assets/retention-expiry.png)
+
+With message retention, shown at the top, a <span style={{color: "#89b557"}}>retention policy</span> applied to all topics in a namespace dictates that some messages are durably stored in Pulsar even though they've already been acknowledged. Acknowledged messages that are not covered by the retention policy are <span style={{color: "#bb3b3e"}}>deleted</span>. Without a retention policy, *all* of the <span style={{color: "#19967d"}}>acknowledged messages</span> would be deleted.
+
+With message expiry, shown at the bottom, some messages are <span style={{color: "#bb3b3e"}}>deleted</span>, even though they <span style={{color: "#337db6"}}>haven't been acknowledged</span>, because they've expired according to the <span style={{color: "#e39441"}}>TTL applied to the namespace</span> (for example because a TTL of 5 minutes has been applied and the messages haven't been acknowledged but are 10 minutes old).
+
+## Message deduplication
+
+Message duplication occurs when a message is [persisted](concepts-architecture-overview.md#persistent-storage) by Pulsar more than once. Message deduplication is an optional Pulsar feature that prevents unnecessary message duplication by processing each message only once, even if the message is received more than once.
+
+The following diagram illustrates what happens when message deduplication is disabled vs. enabled:
+
+![Pulsar message deduplication](/assets/message-deduplication.png)
+
+
+Message deduplication is disabled in the scenario shown at the top. Here, a producer publishes message 1 on a topic; the message reaches a Pulsar broker and is [persisted](concepts-architecture-overview.md#persistent-storage) to BookKeeper. The producer then sends message 1 again (in this case due to some retry logic), and the message is received by the broker and stored in BookKeeper again, which means that duplication has occurred.
+
+In the second scenario at the bottom, the producer publishes message 1, which is received by the broker and persisted, as in the first scenario. When the producer attempts to publish the message again, however, the broker knows that it has already seen message 1 and thus does not persist the message.
+
+> Message deduplication is handled at the namespace level or the topic level. For more instructions, see the [message deduplication cookbook](cookbooks-deduplication.md).
+
+
+### Producer idempotency
+
+The other available approach to message deduplication is to ensure that each message is *only produced once*. This approach is typically called **producer idempotency**. The drawback of this approach is that it defers the work of message deduplication to the application. In Pulsar, this is handled at the [broker](reference-terminology.md#broker) level, so you do not need to modify your Pulsar client code. Instead, you only need to make administrative changes. For details, see [Managing m [...]
+
+### Deduplication and effectively-once semantics
+
+Message deduplication makes Pulsar an ideal messaging system to be used in conjunction with stream processing engines (SPEs) and other systems seeking to provide effectively-once processing semantics. Messaging systems that do not offer automatic message deduplication require the SPE or other system to guarantee deduplication, which means that strict message ordering comes at the cost of burdening the application with the responsibility of deduplication. With Pulsar, strict ordering guar [...]
+
+> You can find more in-depth information in [this post](https://www.splunk.com/en_us/blog/it/exactly-once-is-not-exactly-the-same.html).
+
+## Delayed message delivery
+Delayed message delivery enables you to consume a message later rather than immediately. In this mechanism, a message is stored in BookKeeper, `DelayedDeliveryTracker` maintains the time index(time -> messageId) in memory after published to a broker, and it is delivered to a consumer once the specific delayed time is passed.  
+
+Delayed message delivery only works in Shared subscription mode. In Exclusive and Failover subscription modes, the delayed message is dispatched immediately.
+
+The diagram below illustrates the concept of delayed message delivery:
+
+![Delayed Message Delivery](/assets/message_delay.png)
+
+A broker saves a message without any check. When a consumer consumes a message, if the message is set to delay, then the message is added to `DelayedDeliveryTracker`. A subscription checks and gets timeout messages from `DelayedDeliveryTracker`.
+
+### Broker 
+Delayed message delivery is enabled by default. You can change it in the broker configuration file as below:
+
+```
+# Whether to enable the delayed delivery for messages.
+# If disabled, messages are immediately delivered and there is no tracking overhead.
+delayedDeliveryEnabled=true
+
+# Control the ticking time for the retry of delayed message delivery,
+# affecting the accuracy of the delivery time compared to the scheduled time.
+# Default is 1 second.
+delayedDeliveryTickTimeMillis=1000
+```
+
+### Producer 
+The following is an example of delayed message delivery for a producer in Java:
+```java
+// message to be delivered at the configured delay interval
+producer.newMessage().deliverAfter(3L, TimeUnit.Minute).value("Hello Pulsar!").send();
+```
diff --git a/site2/website-next/docs/concepts-multi-tenancy.md b/site2/website-next/docs/concepts-multi-tenancy.md
new file mode 100644
index 0000000..06d724c
--- /dev/null
+++ b/site2/website-next/docs/concepts-multi-tenancy.md
@@ -0,0 +1,54 @@
+---
+id: concepts-multi-tenancy
+title: Multi Tenancy
+sidebar_label: Multi Tenancy
+---
+
+Pulsar was created from the ground up as a multi-tenant system. To support multi-tenancy, Pulsar has a concept of tenants. Tenants can be spread across clusters and can each have their own [authentication and authorization](security-overview.md) scheme applied to them. They are also the administrative unit at which storage quotas, [message TTL](cookbooks-retention-expiry.md#time-to-live-ttl), and isolation policies can be managed.
+
+The multi-tenant nature of Pulsar is reflected mostly visibly in topic URLs, which have this structure:
+
+```http
+persistent://tenant/namespace/topic
+```
+
+As you can see, the tenant is the most basic unit of categorization for topics (more fundamental than the namespace and topic name).
+
+## Tenants
+
+To each tenant in a Pulsar instance you can assign:
+
+* An [authorization](security-authorization.md) scheme
+* The set of [clusters](reference-terminology.md#cluster) to which the tenant's configuration applies
+
+## Namespaces
+
+Tenants and namespaces are two key concepts of Pulsar to support multi-tenancy.
+
+* Pulsar is provisioned for specified tenants with appropriate capacity allocated to the tenant.
+* A namespace is the administrative unit nomenclature within a tenant. The configuration policies set on a namespace apply to all the topics created in that namespace. A tenant may create multiple namespaces via self-administration using the REST API and the [`pulsar-admin`](reference-pulsar-admin.md) CLI tool. For instance, a tenant with different applications can create a separate namespace for each application.
+
+Names for topics in the same namespace will look like this:
+
+```http
+persistent://tenant/app1/topic-1
+
+persistent://tenant/app1/topic-2
+
+persistent://tenant/app1/topic-3
+```
+
+### Namespace change events and topic-level policies
+
+Pulsar is a multi-tenant event streaming system. Administrators can manage the tenants and namespaces by setting policies at different levels. However, the policies, such as retention policy and storage quota policy, are only available at a namespace level. In many use cases, users need to set a policy at the topic level. The namespace change events approach is proposed for supporting topic-level policies in an efficient way. In this approach, Pulsar is used as an event log to store name [...]
+
+- Avoid using ZooKeeper and introduce more loads to ZooKeeper.
+- Use Pulsar as an event log for propagating the policy cache. It can scale efficiently.
+- Use Pulsar SQL to query the namespace changes and audit the system.
+
+Each namespace has a system topic `__change_events`. This system topic is used for storing change events for a given namespace. The following figure illustrates how to use namespace change events to implement a topic-level policy.
+
+1. Pulsar Admin clients communicate with the Admin Restful API to update topic level policies.
+2. Any broker that receives the Admin HTTP request publishes a topic policy change event to the corresponding `__change_events` topic of the namespace.
+3. Each broker that owns a namespace bundle(s) subscribes to the `__change_events` topic to receive change events of the namespace. It then applies the change events to the policy cache.
+4. Once the policy cache is updated, the broker sends the response back to the Pulsar Admin clients.
diff --git a/site2/website-next/docs/concepts-multiple-advertised-listeners.md b/site2/website-next/docs/concepts-multiple-advertised-listeners.md
new file mode 100644
index 0000000..8ba0db9
--- /dev/null
+++ b/site2/website-next/docs/concepts-multiple-advertised-listeners.md
@@ -0,0 +1,38 @@
+---
+id: concepts-multiple-advertised-listeners
+title: Multiple advertised listeners
+sidebar_label: Multiple advertised listeners
+---
+
+When a Pulsar cluster is deployed in the production environment, it may require to expose multiple advertised addresses for the broker. For example, when you deploy a Pulsar cluster in Kubernetes and want other clients, which are not in the same Kubernetes cluster, to connect to the Pulsar cluster, you need to assign a broker URL to external clients. But clients in the same Kubernetes cluster can still connect to the Pulsar cluster through the internal network of Kubernetes.
+
+## Advertised listeners
+
+To ensure clients in both internal and external networks can connect to a Pulsar cluster, Pulsar introduces `advertisedListeners` and `internalListenerName` configuration options into the [broker configuration file](reference-configuration.md#broker) to ensure that the broker supports exposing multiple advertised listeners and support the separation of internal and external network traffic.
+
+- The `advertisedListeners` is used to specify multiple advertised listeners. The broker uses the listener as the broker identifier in the load manager and the bundle owner data. The `advertisedListeners` is formatted as `<listener_name>:pulsar://<host>:<port>, <listener_name>:pulsar+ssl://<host>:<port>`. You can set up the `advertisedListeners` like
+`advertisedListeners=internal:pulsar://192.168.1.11:6660,internal:pulsar+ssl://192.168.1.11:6651`.
+
+- The `internalListenerName` is used to specify the internal service URL that the broker uses. You can specify the `internalListenerName` by choosing one of the `advertisedListeners`. The broker uses the listener name of the first advertised listener as the `internalListenerName` if the `internalListenerName` is absent.
+
+After setting up the `advertisedListeners`, clients can choose one of the listeners as the service URL to create a connection to the broker as long as the network is accessible. However, if the client creates producers or consumer on a topic, the client must send a lookup requests to the broker for getting the owner broker, then connect to the owner broker to publish messages or consume messages. Therefore, You must allow the client to get the corresponding service URL with the same adve [...]
+
+## Use multiple advertised listeners
+
+This example shows how a Pulsar client uses multiple advertised listeners.
+
+1. Configure multiple advertised listeners in the broker configuration file.
+
+```shell
+advertisedListeners={listenerName}:pulsar://xxxx:6650,
+{listenerName}:pulsar+ssl://xxxx:6651
+```
+
+2. Specify the listener name for the client.
+
+```java
+PulsarClient client = PulsarClient.builder()
+    .serviceUrl("pulsar://xxxx:6650")
+    .listenerName("external")
+    .build();
+```
\ No newline at end of file
diff --git a/site2/website-next/docs/concepts-overview.md b/site2/website-next/docs/concepts-overview.md
new file mode 100644
index 0000000..e30443e
--- /dev/null
+++ b/site2/website-next/docs/concepts-overview.md
@@ -0,0 +1,30 @@
+---
+id: concepts-overview
+title: Pulsar Overview
+sidebar_label: Overview
+---
+
+Pulsar is a multi-tenant, high-performance solution for server-to-server messaging. Pulsar was originally developed by Yahoo, it is under the stewardship of the [Apache Software Foundation](https://www.apache.org/).
+
+Key features of Pulsar are listed below:
+
+* Native support for multiple clusters in a Pulsar instance, with seamless [geo-replication](administration-geo.md) of messages across clusters.
+* Very low publish and end-to-end latency.
+* Seamless scalability to over a million topics.
+* A simple [client API](concepts-clients.md) with bindings for [Java](client-libraries-java.md), [Go](client-libraries-go.md), [Python](client-libraries-python.md) and [C++](client-libraries-cpp.md).
+* Multiple [subscription modes](concepts-messaging.md#subscription-modes) ([exclusive](concepts-messaging.md#exclusive), [shared](concepts-messaging.md#shared), and [failover](concepts-messaging.md#failover)) for topics.
+* Guaranteed message delivery with [persistent message storage](concepts-architecture-overview.md#persistent-storage) provided by [Apache BookKeeper](http://bookkeeper.apache.org/).
+* A serverless light-weight computing framework [Pulsar Functions](functions-overview.md) offers the capability for stream-native data processing.
+* A serverless connector framework [Pulsar IO](io-overview.md), which is built on Pulsar Functions, makes it easier to move data in and out of Apache Pulsar.
+* [Tiered Storage](concepts-tiered-storage.md) offloads data from hot/warm storage to cold/longterm storage (such as S3 and GCS) when the data is aging out.
+
+## Contents
+
+- [Messaging Concepts](concepts-messaging.md)
+- [Architecture Overview](concepts-architecture-overview.md)
+- [Pulsar Clients](concepts-clients.md)
+- [Geo Replication](concepts-replication.md)
+- [Multi Tenancy](concepts-multi-tenancy.md)
+- [Authentication and Authorization](concepts-authentication.md)
+- [Topic Compaction](concepts-topic-compaction.md)
+- [Tiered Storage](concepts-tiered-storage.md)
diff --git a/site2/website-next/docs/concepts-proxy-sni-routing.md b/site2/website-next/docs/concepts-proxy-sni-routing.md
new file mode 100644
index 0000000..3574b54
--- /dev/null
+++ b/site2/website-next/docs/concepts-proxy-sni-routing.md
@@ -0,0 +1,150 @@
+---
+id: concepts-proxy-sni-routing
+title: Proxy support with SNI routing
+sidebar_label: Proxy support with SNI routing
+---
+
+A proxy server is an intermediary server that forwards requests from multiple clients to different servers across the Internet. The proxy server acts as a "traffic cop" in both forward and reverse proxy scenarios, and benefits your system such as load balancing, performance, security, auto-scaling, and so on.
+
+The proxy in Pulsar acts as a reverse proxy, and creates a gateway in front of brokers. Proxies such as Apache Traffic Server (ATS), HAProxy, Nginx, and Envoy are not supported in Pulsar. These proxy-servers support **SNI routing**. SNI routing is used to route traffic to a destination without terminating the SSL connection. Layer 4 routing provides greater transparency because the outbound connection is determined by examining the destination address in the client TCP packets.
+
+Pulsar clients (Java, C++, Python) support [SNI routing protocol](https://github.com/apache/pulsar/wiki/PIP-60:-Support-Proxy-server-with-SNI-routing), so you can connect to brokers through the proxy. This document walks you through how to set up the ATS proxy, enable SNI routing, and connect Pulsar client to the broker through the ATS proxy.
+
+## ATS-SNI Routing in Pulsar
+To support [layer-4 SNI routing](https://docs.trafficserver.apache.org/en/latest/admin-guide/layer-4-routing.en.html) with ATS, the inbound connection must be a TLS connection. Pulsar client supports SNI routing protocol on TLS connection, so when Pulsar clients connect to broker through ATS proxy, Pulsar uses ATS as a reverse proxy.
+
+Pulsar supports SNI routing for geo-replication, so brokers can connect to brokers in other clusters through the ATS proxy.
+
+This section explains how to set up and use ATS as a reverse proxy, so Pulsar clients can connect to brokers through the ATS proxy using the SNI routing protocol on TLS connection. 
+
+### Set up ATS Proxy for layer-4 SNI routing
+To support layer 4 SNI routing, you need to configure the `records.conf` and `ssl_server_name.conf` files.
+
+![Pulsar client SNI](/assets/pulsar-sni-client.png)
+
+The [records.config](https://docs.trafficserver.apache.org/en/latest/admin-guide/files/records.config.en.html) file is located in the `/usr/local/etc/trafficserver/` directory by default. The file lists configurable variables used by the ATS.
+
+To configure the `records.config` files, complete the following steps.
+1. Update TLS port (`http.server_ports`) on which proxy listens, and update proxy certs (`ssl.client.cert.path` and `ssl.client.cert.filename`) to secure TLS tunneling. 
+2. Configure server ports (`http.connect_ports`) used for tunneling to the broker. If Pulsar brokers are listening on `4443` and `6651` ports, add the brokers service port in the `http.connect_ports` configuration.
+
+The following is an example.
+
+```
+# PROXY TLS PORT
+CONFIG proxy.config.http.server_ports STRING 4443:ssl 4080
+# PROXY CERTS FILE PATH
+CONFIG proxy.config.ssl.client.cert.path STRING /proxy-cert.pem
+# PROXY KEY FILE PATH
+CONFIG proxy.config.ssl.client.cert.filename STRING /proxy-key.pem
+
+
+# The range of origin server ports that can be used for tunneling via CONNECT. # Traffic Server allows tunnels only to the specified ports. Supports both wildcards (*) and ranges (e.g. 0-1023).
+CONFIG proxy.config.http.connect_ports STRING 4443 6651
+```
+
+The [ssl_server_name](https://docs.trafficserver.apache.org/en/8.0.x/admin-guide/files/ssl_server_name.yaml.en.html) file is used to configure TLS connection handling for inbound and outbound connections. The configuration is determined by the SNI values provided by the inbound connection. The file consists of a set of configuration items, and each is identified by an SNI value (`fqdn`). When an inbound TLS connection is made, the SNI value from the TLS negotiation is matched with the it [...]
+
+The following example shows mapping of the inbound SNI hostname coming from the client, and the actual broker service URL where request should be redirected. For example, if the client sends the SNI header `pulsar-broker1`, the proxy creates a TLS tunnel by redirecting request to the `pulsar-broker1:6651` service URL.
+
+```
+server_config = {
+  {
+     fqdn = 'pulsar-broker-vip',
+     # Forward to Pulsar broker which is listening on 6651
+     tunnel_route = 'pulsar-broker-vip:6651'
+  },
+  {
+     fqdn = 'pulsar-broker1',
+     # Forward to Pulsar broker-1 which is listening on 6651
+     tunnel_route = 'pulsar-broker1:6651'
+  },
+  {
+     fqdn = 'pulsar-broker2',
+     # Forward to Pulsar broker-2 which is listening on 6651
+     tunnel_route = 'pulsar-broker2:6651'
+  },
+}
+```
+
+After you configure the `ssl_server_name.config` and `records.config` files, the ATS-proxy server handles SNI routing and creates TCP tunnel between the client and the broker.
+
+### Configure Pulsar-client with SNI routing
+ATS SNI-routing works only with TLS. You need to enable TLS for the ATS proxy and brokers first, configure the SNI routing protocol, and then connect Pulsar clients to brokers through ATS proxy. Pulsar clients support SNI routing by connecting to the proxy, and sending the target broker URL to the SNI header. This process is processed internally. You only need to configure the following proxy configuration initially when you create a Pulsar client to use the SNI routing protocol.
+
+<!--DOCUSAURUS_CODE_TABS-->
+
+<!--Java-->
+
+```java
+String brokerServiceUrl = “pulsar+ssl://pulsar-broker-vip:6651/”;
+String proxyUrl = “pulsar+ssl://ats-proxy:443”;
+ClientBuilder clientBuilder = PulsarClient.builder()
+		.serviceUrl(brokerServiceUrl)
+        .tlsTrustCertsFilePath(TLS_TRUST_CERT_FILE_PATH)
+        .enableTls(true)
+        .allowTlsInsecureConnection(false)
+        .proxyServiceUrl(proxyUrl, ProxyProtocol.SNI)
+        .operationTimeout(1000, TimeUnit.MILLISECONDS);
+
+Map<String, String> authParams = new HashMap<>();
+authParams.put("tlsCertFile", TLS_CLIENT_CERT_FILE_PATH);
+authParams.put("tlsKeyFile", TLS_CLIENT_KEY_FILE_PATH);
+clientBuilder.authentication(AuthenticationTls.class.getName(), authParams);
+
+PulsarClient pulsarClient = clientBuilder.build();
+```
+
+<!--C++-->
+
+```c++
+ClientConfiguration config = ClientConfiguration();
+config.setUseTls(true);
+config.setTlsTrustCertsFilePath("/path/to/cacert.pem");
+config.setTlsAllowInsecureConnection(false);
+config.setAuth(pulsar::AuthTls::create(
+            "/path/to/client-cert.pem", "/path/to/client-key.pem"););
+
+Client client("pulsar+ssl://ats-proxy:443", config);
+```
+
+<!--Python-->
+
+```python
+from pulsar import Client, AuthenticationTLS
+
+auth = AuthenticationTLS("/path/to/my-role.cert.pem", "/path/to/my-role.key-pk8.pem")
+client = Client("pulsar+ssl://ats-proxy:443",
+                tls_trust_certs_file_path="/path/to/ca.cert.pem",
+                tls_allow_insecure_connection=False,
+                authentication=auth)
+```
+
+<!--END_DOCUSAURUS_CODE_TABS-->
+
+### Pulsar geo-replication with SNI routing
+You can use the ATS proxy for geo-replication. Pulsar brokers can connect to brokers in geo-replication by using SNI routing. To enable SNI routing for broker connection cross clusters, you need to configure SNI proxy URL to the cluster metadata. If you have configured SNI proxy URL in the cluster metadata, you can connect to broker cross clusters through the proxy over SNI routing.
+
+![Pulsar client SNI](/assets/pulsar-sni-geo.png)
+
+In this example, a Pulsar cluster is deployed into two separate regions, `us-west` and `us-east`. Both regions are configured with ATS proxy, and brokers in each region run behind the ATS proxy. We configure the cluster metadata for both clusters, so brokers in one cluster can use SNI routing and connect to brokers in other clusters through the ATS proxy.
+
+(a) Configure the cluster metadata for `us-east` with `us-east` broker service URL and `us-east` ATS proxy URL with SNI proxy-protocol.
+
+```
+./pulsar-admin clusters update \
+--broker-url-secure pulsar+ssl://east-broker-vip:6651 \
+--url http://east-broker-vip:8080 \
+--proxy-protocol SNI \
+--proxy-url pulsar+ssl://east-ats-proxy:443
+```
+
+(b) Configure the cluster metadata for `us-west` with `us-west` broker service URL and `us-west` ATS proxy URL with SNI proxy-protocol.
+
+```
+./pulsar-admin clusters update \
+--broker-url-secure pulsar+ssl://west-broker-vip:6651 \
+--url http://west-broker-vip:8080 \
+--proxy-protocol SNI \
+--proxy-url pulsar+ssl://west-ats-proxy:443
+```
diff --git a/site2/website-next/docs/concepts-replication.md b/site2/website-next/docs/concepts-replication.md
new file mode 100644
index 0000000..3d1c823
--- /dev/null
+++ b/site2/website-next/docs/concepts-replication.md
@@ -0,0 +1,8 @@
+---
+id: concepts-replication
+title: Geo Replication
+sidebar_label: Geo Replication
+---
+
+Pulsar enables messages to be produced and consumed in different geo-locations. For instance, your application may be publishing data in one region or market and you would like to process it for consumption in other regions or markets. [Geo-replication](administration-geo.md) in Pulsar enables you to do that.
+
diff --git a/site2/website-next/docs/concepts-schema-registry.md b/site2/website-next/docs/concepts-schema-registry.md
new file mode 100644
index 0000000..b405c42
--- /dev/null
+++ b/site2/website-next/docs/concepts-schema-registry.md
@@ -0,0 +1,5 @@
+---
+id: concepts-schema-registry
+title: Schema Registry
+sidebar_label: Schema Registry
+---
diff --git a/site2/website-next/docs/concepts-tiered-storage.md b/site2/website-next/docs/concepts-tiered-storage.md
new file mode 100644
index 0000000..1448287
--- /dev/null
+++ b/site2/website-next/docs/concepts-tiered-storage.md
@@ -0,0 +1,17 @@
+---
+id: concepts-tiered-storage
+title: Tiered Storage
+sidebar_label: Tiered Storage
+---
+
+Pulsar's segment oriented architecture allows for topic backlogs to grow very large, effectively without limit. However, this can become expensive over time.
+
+One way to alleviate this cost is to use Tiered Storage. With tiered storage, older messages in the backlog can be moved from BookKeeper to a cheaper storage mechanism, while still allowing clients to access the backlog as if nothing had changed.
+
+![Tiered Storage](/assets/pulsar-tiered-storage.png)
+
+> Data written to BookKeeper is replicated to 3 physical machines by default. However, once a segment is sealed in BookKeeper it becomes immutable and can be copied to long term storage. Long term storage can achieve cost savings by using mechanisms such as [Reed-Solomon error correction](https://en.wikipedia.org/wiki/Reed%E2%80%93Solomon_error_correction) to require fewer physical copies of data.
+
+Pulsar currently supports S3, Google Cloud Storage (GCS), and filesystem for [long term store](https://pulsar.apache.org/docs/en/cookbooks-tiered-storage/). Offloading to long term storage triggered via a Rest API or command line interface. The user passes in the amount of topic data they wish to retain on BookKeeper, and the broker will copy the backlog data to long term storage. The original data will then be deleted from BookKeeper after a configured delay (4 hours by default).
+
+> For a guide for setting up tiered storage, see the [Tiered storage cookbook](cookbooks-tiered-storage.md).
diff --git a/site2/website-next/docs/concepts-topic-compaction.md b/site2/website-next/docs/concepts-topic-compaction.md
new file mode 100644
index 0000000..96f0136
--- /dev/null
+++ b/site2/website-next/docs/concepts-topic-compaction.md
@@ -0,0 +1,36 @@
+---
+id: concepts-topic-compaction
+title: Topic Compaction
+sidebar_label: Topic Compaction
+---
+
+Pulsar was built with highly scalable [persistent storage](concepts-architecture-overview.md#persistent-storage) of message data as a primary objective. Pulsar topics enable you to persistently store as many unacknowledged messages as you need while preserving message ordering. By default, Pulsar stores *all* unacknowledged/unprocessed messages produced on a topic. Accumulating many unacknowledged messages on a topic is necessary for many Pulsar use cases but it can also be very time int [...]
+
+> For a more practical guide to topic compaction, see the [Topic compaction cookbook](cookbooks-compaction.md).
+
+For some use cases consumers don't need a complete "image" of the topic log. They may only need a few values to construct a more "shallow" image of the log, perhaps even just the most recent value. For these kinds of use cases Pulsar offers **topic compaction**. When you run compaction on a topic, Pulsar goes through a topic's backlog and removes messages that are *obscured* by later messages, i.e. it goes through the topic on a per-key basis and leaves only the most recent message assoc [...]
+
+Pulsar's topic compaction feature:
+
+* Allows for faster "rewind" through topic logs
+* Applies only to [persistent topics](concepts-architecture-overview.md#persistent-storage)
+* Triggered automatically when the backlog reaches a certain size or can be triggered manually via the command line. See the [Topic compaction cookbook](cookbooks-compaction.md)
+* Is conceptually and operationally distinct from [retention and expiry](concepts-messaging.md#message-retention-and-expiry). Topic compaction *does*, however, respect retention. If retention has removed a message from the message backlog of a topic, the message will also not be readable from the compacted topic ledger.
+
+> #### Topic compaction example: the stock ticker
+> An example use case for a compacted Pulsar topic would be a stock ticker topic. On a stock ticker topic, each message bears a timestamped dollar value for stocks for purchase (with the message key holding the stock symbol, e.g. `AAPL` or `GOOG`). With a stock ticker you may care only about the most recent value(s) of the stock and have no interest in historical data (i.e. you don't need to construct a complete image of the topic's sequence of messages per key). Compaction would be high [...]
+
+
+## How topic compaction works
+
+When topic compaction is triggered [via the CLI](cookbooks-compaction.md), Pulsar will iterate over the entire topic from beginning to end. For each key that it encounters the compaction routine will keep a record of the latest occurrence of that key.
+
+After that, the broker will create a new [BookKeeper ledger](concepts-architecture-overview.md#ledgers) and make a second iteration through each message on the topic. For each message, if the key matches the latest occurrence of that key, then the key's data payload, message ID, and metadata will be written to the newly created ledger. If the key doesn't match the latest then the message will be skipped and left alone. If any given message has an empty payload, it will be skipped and con [...]
+
+After the initial compaction operation, the Pulsar [broker](reference-terminology.md#broker) that owns the topic is notified whenever any future changes are made to the compaction horizon and compacted backlog. When such changes occur:
+
+* Clients (consumers and readers) that have read compacted enabled will attempt to read messages from a topic and either:
+  * Read from the topic like normal (if the message ID is greater than or equal to the compaction horizon) or
+  * Read beginning at the compaction horizon (if the message ID is lower than the compaction horizon)
+
+
diff --git a/site2/website-next/docs/concepts-transactions.md b/site2/website-next/docs/concepts-transactions.md
new file mode 100644
index 0000000..ab8818fa
--- /dev/null
+++ b/site2/website-next/docs/concepts-transactions.md
@@ -0,0 +1,29 @@
+---
+id: transactions
+title: Transactions
+sidebar_label: Overview
+---
+
+Transactional semantics enable event streaming applications to consume, process, and produce messages in one atomic operation. In Pulsar, a producer or consumer can work with messages across multiple topics and partitions and ensure those messages are processed as a single unit. 
+
+The following concepts help you understand Pulsar transactions.
+
+## Transaction coordinator and transaction log
+The transaction coordinator maintains the topics and subscriptions that interact in a transaction. When a transaction is committed, the transaction coordinator interacts with the topic owner broker to complete the transaction.
+
+The transaction coordinator maintains the entire life cycle of transactions, and prevents a transaction from incorrect status.
+
+The transaction coordinator handles transaction timeout, and ensures that the transaction is aborted after a transaction timeout.
+
+All the transaction metadata is persisted in the transaction log. The transaction log is backed by a Pulsar topic. After the transaction coordinator crashes, it can restore the transaction metadata from the transaction log.
+
+## Transaction ID
+The transaction ID (TxnID) identifies a unique transaction in Pulsar. The transaction ID is 128-bit. The highest 16 bits are reserved for the ID of the transaction coordinator, and the remaining bits are used for monotonically increasing numbers in each transaction coordinator. It is easy to locate the transaction crash with the TxnID.
+
+## Transaction buffer
+Messages produced within a transaction are stored in the transaction buffer. The messages in transaction buffer are not materialized (visible) to consumers until the transactions are committed. The messages in the transaction buffer are discarded when the transactions are aborted. 
+
+## Pending acknowledge state
+Message acknowledges within a transaction are maintained by the pending acknowledge state before the transaction completes. If a message is in the pending acknowledge state, the message cannot be acknowledged by other transactions until the message is removed from the pending acknowledge state.
+
+The pending acknowledge state is persisted to the pending acknowledge log. The pending acknowledge log is backed by a Pulsar topic. A new broker can restore the state from the pending acknowledge log to ensure the acknowledgement is not lost.
diff --git a/site2/website-next/docusaurus.config.js b/site2/website-next/docusaurus.config.js
index c95d779..84db594 100644
--- a/site2/website-next/docusaurus.config.js
+++ b/site2/website-next/docusaurus.config.js
@@ -98,4 +98,12 @@ module.exports = {
       },
     ],
   ],
+  plugins: [
+    [
+      "@docusaurus/plugin-client-redirects",
+      {
+        fromExtensions: ["md"],
+      },
+    ],
+  ],
 };
diff --git a/site2/website-next/package.json b/site2/website-next/package.json
index 8ae36b9..24b9f5b 100644
--- a/site2/website-next/package.json
+++ b/site2/website-next/package.json
@@ -15,6 +15,7 @@
   },
   "dependencies": {
     "@docusaurus/core": "2.0.0-beta.5",
+    "@docusaurus/plugin-client-redirects": "^2.0.0-beta.5",
     "@docusaurus/preset-classic": "2.0.0-beta.5",
     "@mdx-js/react": "^1.6.22",
     "@svgr/webpack": "^5.5.0",
diff --git a/site2/website-next/sidebars.js b/site2/website-next/sidebars.js
index 30eb093..07380b9 100644
--- a/site2/website-next/sidebars.js
+++ b/site2/website-next/sidebars.js
@@ -5,5 +5,21 @@ module.exports = {
       label: "Get Started",
       items: ["standalone", "standalone-docker", "kubernetes-helm"],
     },
+    {
+      type: "category",
+      label: "Concepts and Architecture",
+      items: [
+        "concepts-overview",
+        "concepts-messaging",
+        "concepts-architecture-overview",
+        "concepts-clients",
+        "concepts-replication",
+        "concepts-multi-tenancy",
+        "concepts-authentication",
+        "concepts-topic-compaction",
+        "concepts-proxy-sni-routing",
+        "concepts-multiple-advertised-listeners",
+      ],
+    },
   ],
 };
diff --git a/site2/website-next/static/assets/binary-protocol-connect.png b/site2/website-next/static/assets/binary-protocol-connect.png
new file mode 100644
index 0000000..e6b0d6a
Binary files /dev/null and b/site2/website-next/static/assets/binary-protocol-connect.png differ
diff --git a/site2/website-next/static/assets/binary-protocol-consumer.png b/site2/website-next/static/assets/binary-protocol-consumer.png
new file mode 100644
index 0000000..b526385
Binary files /dev/null and b/site2/website-next/static/assets/binary-protocol-consumer.png differ
diff --git a/site2/website-next/static/assets/binary-protocol-producer.png b/site2/website-next/static/assets/binary-protocol-producer.png
new file mode 100644
index 0000000..e971737
Binary files /dev/null and b/site2/website-next/static/assets/binary-protocol-producer.png differ
diff --git a/site2/website-next/static/assets/binary-protocol-topic-lookup.png b/site2/website-next/static/assets/binary-protocol-topic-lookup.png
new file mode 100644
index 0000000..2fd8551
Binary files /dev/null and b/site2/website-next/static/assets/binary-protocol-topic-lookup.png differ
diff --git a/site2/website-next/static/assets/broker-bookie.png b/site2/website-next/static/assets/broker-bookie.png
new file mode 100644
index 0000000..c866159
Binary files /dev/null and b/site2/website-next/static/assets/broker-bookie.png differ
diff --git a/site2/website-next/static/assets/chunking-01.png b/site2/website-next/static/assets/chunking-01.png
new file mode 100644
index 0000000..f83b747
Binary files /dev/null and b/site2/website-next/static/assets/chunking-01.png differ
diff --git a/site2/website-next/static/assets/chunking-02.png b/site2/website-next/static/assets/chunking-02.png
new file mode 100644
index 0000000..eb47a86
Binary files /dev/null and b/site2/website-next/static/assets/chunking-02.png differ
diff --git a/site2/website-next/static/assets/dcos_bookie_log.png b/site2/website-next/static/assets/dcos_bookie_log.png
new file mode 100644
index 0000000..31ac263
Binary files /dev/null and b/site2/website-next/static/assets/dcos_bookie_log.png differ
diff --git a/site2/website-next/static/assets/dcos_bookkeeper_in_zookeeper.png b/site2/website-next/static/assets/dcos_bookkeeper_in_zookeeper.png
new file mode 100644
index 0000000..80680bc
Binary files /dev/null and b/site2/website-next/static/assets/dcos_bookkeeper_in_zookeeper.png differ
diff --git a/site2/website-next/static/assets/dcos_bookkeeper_run.png b/site2/website-next/static/assets/dcos_bookkeeper_run.png
new file mode 100644
index 0000000..8cda68c
Binary files /dev/null and b/site2/website-next/static/assets/dcos_bookkeeper_run.png differ
diff --git a/site2/website-next/static/assets/dcos_bookkeeper_status.png b/site2/website-next/static/assets/dcos_bookkeeper_status.png
new file mode 100644
index 0000000..5e09a0c
Binary files /dev/null and b/site2/website-next/static/assets/dcos_bookkeeper_status.png differ
diff --git a/site2/website-next/static/assets/dcos_broker_in_zookeeper.png b/site2/website-next/static/assets/dcos_broker_in_zookeeper.png
new file mode 100644
index 0000000..3563e34
Binary files /dev/null and b/site2/website-next/static/assets/dcos_broker_in_zookeeper.png differ
diff --git a/site2/website-next/static/assets/dcos_broker_log.png b/site2/website-next/static/assets/dcos_broker_log.png
new file mode 100644
index 0000000..dfb78a7
Binary files /dev/null and b/site2/website-next/static/assets/dcos_broker_log.png differ
diff --git a/site2/website-next/static/assets/dcos_broker_run.png b/site2/website-next/static/assets/dcos_broker_run.png
new file mode 100644
index 0000000..9afeadb
Binary files /dev/null and b/site2/website-next/static/assets/dcos_broker_run.png differ
diff --git a/site2/website-next/static/assets/dcos_broker_status.png b/site2/website-next/static/assets/dcos_broker_status.png
new file mode 100644
index 0000000..d42f233
Binary files /dev/null and b/site2/website-next/static/assets/dcos_broker_status.png differ
diff --git a/site2/website-next/static/assets/dcos_command_execute.png b/site2/website-next/static/assets/dcos_command_execute.png
new file mode 100644
index 0000000..a5c4c42
Binary files /dev/null and b/site2/website-next/static/assets/dcos_command_execute.png differ
diff --git a/site2/website-next/static/assets/dcos_command_execute2.png b/site2/website-next/static/assets/dcos_command_execute2.png
new file mode 100644
index 0000000..5670cd0
Binary files /dev/null and b/site2/website-next/static/assets/dcos_command_execute2.png differ
diff --git a/site2/website-next/static/assets/dcos_consumer.png b/site2/website-next/static/assets/dcos_consumer.png
new file mode 100644
index 0000000..8b9b64a
Binary files /dev/null and b/site2/website-next/static/assets/dcos_consumer.png differ
diff --git a/site2/website-next/static/assets/dcos_grafana_dashboard.png b/site2/website-next/static/assets/dcos_grafana_dashboard.png
new file mode 100644
index 0000000..b937003
Binary files /dev/null and b/site2/website-next/static/assets/dcos_grafana_dashboard.png differ
diff --git a/site2/website-next/static/assets/dcos_grafana_endpoint.png b/site2/website-next/static/assets/dcos_grafana_endpoint.png
new file mode 100644
index 0000000..20e5894
Binary files /dev/null and b/site2/website-next/static/assets/dcos_grafana_endpoint.png differ
diff --git a/site2/website-next/static/assets/dcos_metrics.png b/site2/website-next/static/assets/dcos_metrics.png
new file mode 100644
index 0000000..7e06512
Binary files /dev/null and b/site2/website-next/static/assets/dcos_metrics.png differ
diff --git a/site2/website-next/static/assets/dcos_monitor_status.png b/site2/website-next/static/assets/dcos_monitor_status.png
new file mode 100644
index 0000000..bfc2089
Binary files /dev/null and b/site2/website-next/static/assets/dcos_monitor_status.png differ
diff --git a/site2/website-next/static/assets/dcos_producer.png b/site2/website-next/static/assets/dcos_producer.png
new file mode 100644
index 0000000..21a7cfc
Binary files /dev/null and b/site2/website-next/static/assets/dcos_producer.png differ
diff --git a/site2/website-next/static/assets/dcos_prom_endpoint.png b/site2/website-next/static/assets/dcos_prom_endpoint.png
new file mode 100644
index 0000000..36c9b8c
Binary files /dev/null and b/site2/website-next/static/assets/dcos_prom_endpoint.png differ
diff --git a/site2/website-next/static/assets/dcos_prom_targets.png b/site2/website-next/static/assets/dcos_prom_targets.png
new file mode 100644
index 0000000..0d362f3
Binary files /dev/null and b/site2/website-next/static/assets/dcos_prom_targets.png differ
diff --git a/site2/website-next/static/assets/dcos_uninstall.png b/site2/website-next/static/assets/dcos_uninstall.png
new file mode 100644
index 0000000..4ef4f56
Binary files /dev/null and b/site2/website-next/static/assets/dcos_uninstall.png differ
diff --git a/site2/website-next/static/assets/functions-worker-corun-proxy.png b/site2/website-next/static/assets/functions-worker-corun-proxy.png
new file mode 100644
index 0000000..3942968
Binary files /dev/null and b/site2/website-next/static/assets/functions-worker-corun-proxy.png differ
diff --git a/site2/website-next/static/assets/functions-worker-corun.png b/site2/website-next/static/assets/functions-worker-corun.png
new file mode 100644
index 0000000..224d114
Binary files /dev/null and b/site2/website-next/static/assets/functions-worker-corun.png differ
diff --git a/site2/website-next/static/assets/functions-worker-separated-proxy.png b/site2/website-next/static/assets/functions-worker-separated-proxy.png
new file mode 100644
index 0000000..b2f2714
Binary files /dev/null and b/site2/website-next/static/assets/functions-worker-separated-proxy.png differ
diff --git a/site2/website-next/static/assets/functions-worker-separated.png b/site2/website-next/static/assets/functions-worker-separated.png
new file mode 100644
index 0000000..b710c34
Binary files /dev/null and b/site2/website-next/static/assets/functions-worker-separated.png differ
diff --git a/site2/website-next/static/assets/geo-replication.png b/site2/website-next/static/assets/geo-replication.png
new file mode 100644
index 0000000..f913d54
Binary files /dev/null and b/site2/website-next/static/assets/geo-replication.png differ
diff --git a/site2/website-next/static/assets/message-deduplication.png b/site2/website-next/static/assets/message-deduplication.png
new file mode 100644
index 0000000..23e3e60
Binary files /dev/null and b/site2/website-next/static/assets/message-deduplication.png differ
diff --git a/site2/website-next/static/assets/message_delay.png b/site2/website-next/static/assets/message_delay.png
new file mode 100644
index 0000000..d9c3e02
Binary files /dev/null and b/site2/website-next/static/assets/message_delay.png differ
diff --git a/site2/website-next/static/assets/partitioning.png b/site2/website-next/static/assets/partitioning.png
new file mode 100644
index 0000000..b049452
Binary files /dev/null and b/site2/website-next/static/assets/partitioning.png differ
diff --git a/site2/website-next/static/assets/perf-produce.png b/site2/website-next/static/assets/perf-produce.png
new file mode 100644
index 0000000..b8a117f
Binary files /dev/null and b/site2/website-next/static/assets/perf-produce.png differ
diff --git a/site2/website-next/static/assets/pulsar-basic-setup.png b/site2/website-next/static/assets/pulsar-basic-setup.png
new file mode 100644
index 0000000..bb85eb9
Binary files /dev/null and b/site2/website-next/static/assets/pulsar-basic-setup.png differ
diff --git a/site2/website-next/static/assets/pulsar-encryption-consumer.jpg b/site2/website-next/static/assets/pulsar-encryption-consumer.jpg
new file mode 100644
index 0000000..41f3f24
Binary files /dev/null and b/site2/website-next/static/assets/pulsar-encryption-consumer.jpg differ
diff --git a/site2/website-next/static/assets/pulsar-encryption-producer.jpg b/site2/website-next/static/assets/pulsar-encryption-producer.jpg
new file mode 100644
index 0000000..1c4050e
Binary files /dev/null and b/site2/website-next/static/assets/pulsar-encryption-producer.jpg differ
diff --git a/site2/website-next/static/assets/pulsar-exclusive-subscriptions.png b/site2/website-next/static/assets/pulsar-exclusive-subscriptions.png
new file mode 100644
index 0000000..3d5867b
Binary files /dev/null and b/site2/website-next/static/assets/pulsar-exclusive-subscriptions.png differ
diff --git a/site2/website-next/static/assets/pulsar-failover-subscriptions.png b/site2/website-next/static/assets/pulsar-failover-subscriptions.png
new file mode 100644
index 0000000..2cf83fc
Binary files /dev/null and b/site2/website-next/static/assets/pulsar-failover-subscriptions.png differ
diff --git a/site2/website-next/static/assets/pulsar-functions-overview.png b/site2/website-next/static/assets/pulsar-functions-overview.png
new file mode 100644
index 0000000..065046b
Binary files /dev/null and b/site2/website-next/static/assets/pulsar-functions-overview.png differ
diff --git a/site2/website-next/static/assets/pulsar-functions-routing-example.png b/site2/website-next/static/assets/pulsar-functions-routing-example.png
new file mode 100644
index 0000000..27a1c44
Binary files /dev/null and b/site2/website-next/static/assets/pulsar-functions-routing-example.png differ
diff --git a/site2/website-next/static/assets/pulsar-functions-word-count.png b/site2/website-next/static/assets/pulsar-functions-word-count.png
new file mode 100644
index 0000000..db1e497
Binary files /dev/null and b/site2/website-next/static/assets/pulsar-functions-word-count.png differ
diff --git a/site2/website-next/static/assets/pulsar-io.png b/site2/website-next/static/assets/pulsar-io.png
new file mode 100644
index 0000000..3e74d4b
Binary files /dev/null and b/site2/website-next/static/assets/pulsar-io.png differ
diff --git a/site2/website-next/static/assets/pulsar-key-shared-subscriptions.png b/site2/website-next/static/assets/pulsar-key-shared-subscriptions.png
new file mode 100644
index 0000000..db02e0f
Binary files /dev/null and b/site2/website-next/static/assets/pulsar-key-shared-subscriptions.png differ
diff --git a/site2/website-next/static/assets/pulsar-reader-consumer-interfaces.png b/site2/website-next/static/assets/pulsar-reader-consumer-interfaces.png
new file mode 100644
index 0000000..26f05d3
Binary files /dev/null and b/site2/website-next/static/assets/pulsar-reader-consumer-interfaces.png differ
diff --git a/site2/website-next/static/assets/pulsar-service-discovery.png b/site2/website-next/static/assets/pulsar-service-discovery.png
new file mode 100644
index 0000000..4dc3224
Binary files /dev/null and b/site2/website-next/static/assets/pulsar-service-discovery.png differ
diff --git a/site2/website-next/static/assets/pulsar-shared-subscriptions.png b/site2/website-next/static/assets/pulsar-shared-subscriptions.png
new file mode 100644
index 0000000..13c0dae
Binary files /dev/null and b/site2/website-next/static/assets/pulsar-shared-subscriptions.png differ
diff --git a/site2/website-next/static/assets/pulsar-sni-client.png b/site2/website-next/static/assets/pulsar-sni-client.png
new file mode 100644
index 0000000..c02e046
Binary files /dev/null and b/site2/website-next/static/assets/pulsar-sni-client.png differ
diff --git a/site2/website-next/static/assets/pulsar-sni-geo.png b/site2/website-next/static/assets/pulsar-sni-geo.png
new file mode 100644
index 0000000..bff8482
Binary files /dev/null and b/site2/website-next/static/assets/pulsar-sni-geo.png differ
diff --git a/site2/website-next/static/assets/pulsar-sql-arch-1.png b/site2/website-next/static/assets/pulsar-sql-arch-1.png
new file mode 100755
index 0000000..0441d34
Binary files /dev/null and b/site2/website-next/static/assets/pulsar-sql-arch-1.png differ
diff --git a/site2/website-next/static/assets/pulsar-sql-arch-2.png b/site2/website-next/static/assets/pulsar-sql-arch-2.png
new file mode 100755
index 0000000..36a136d
Binary files /dev/null and b/site2/website-next/static/assets/pulsar-sql-arch-2.png differ
diff --git a/site2/website-next/static/assets/pulsar-subscription-modes.png b/site2/website-next/static/assets/pulsar-subscription-modes.png
new file mode 100644
index 0000000..1412fd7
Binary files /dev/null and b/site2/website-next/static/assets/pulsar-subscription-modes.png differ
diff --git a/site2/website-next/static/assets/pulsar-system-architecture.png b/site2/website-next/static/assets/pulsar-system-architecture.png
new file mode 100644
index 0000000..7e14381
Binary files /dev/null and b/site2/website-next/static/assets/pulsar-system-architecture.png differ
diff --git a/site2/website-next/static/assets/pulsar-tiered-storage.png b/site2/website-next/static/assets/pulsar-tiered-storage.png
new file mode 100644
index 0000000..453bdf1
Binary files /dev/null and b/site2/website-next/static/assets/pulsar-tiered-storage.png differ
diff --git a/site2/website-next/static/assets/retention-expiry.png b/site2/website-next/static/assets/retention-expiry.png
new file mode 100644
index 0000000..5b9f5fd
Binary files /dev/null and b/site2/website-next/static/assets/retention-expiry.png differ
diff --git a/site2/website-next/static/assets/schema-autoupdate-consumer.png b/site2/website-next/static/assets/schema-autoupdate-consumer.png
new file mode 100644
index 0000000..3f6f6a7
Binary files /dev/null and b/site2/website-next/static/assets/schema-autoupdate-consumer.png differ
diff --git a/site2/website-next/static/assets/schema-autoupdate-producer.png b/site2/website-next/static/assets/schema-autoupdate-producer.png
new file mode 100644
index 0000000..b8d046c
Binary files /dev/null and b/site2/website-next/static/assets/schema-autoupdate-producer.png differ
diff --git a/site2/website-next/static/assets/schema-consumer.png b/site2/website-next/static/assets/schema-consumer.png
new file mode 100644
index 0000000..81b3d26
Binary files /dev/null and b/site2/website-next/static/assets/schema-consumer.png differ
diff --git a/site2/website-next/static/assets/schema-producer.png b/site2/website-next/static/assets/schema-producer.png
new file mode 100644
index 0000000..87ac64e
Binary files /dev/null and b/site2/website-next/static/assets/schema-producer.png differ
diff --git a/site2/website-next/static/assets/txn-1.png b/site2/website-next/static/assets/txn-1.png
new file mode 100644
index 0000000..357099b
Binary files /dev/null and b/site2/website-next/static/assets/txn-1.png differ
diff --git a/site2/website-next/static/assets/txn-2.png b/site2/website-next/static/assets/txn-2.png
new file mode 100644
index 0000000..2ab46e1
Binary files /dev/null and b/site2/website-next/static/assets/txn-2.png differ
diff --git a/site2/website-next/static/assets/txn-3.png b/site2/website-next/static/assets/txn-3.png
new file mode 100644
index 0000000..33236ee
Binary files /dev/null and b/site2/website-next/static/assets/txn-3.png differ
diff --git a/site2/website-next/static/assets/txn-4.png b/site2/website-next/static/assets/txn-4.png
new file mode 100644
index 0000000..fca20aa
Binary files /dev/null and b/site2/website-next/static/assets/txn-4.png differ
diff --git a/site2/website-next/static/assets/txn-5.png b/site2/website-next/static/assets/txn-5.png
new file mode 100644
index 0000000..0de425c
Binary files /dev/null and b/site2/website-next/static/assets/txn-5.png differ
diff --git a/site2/website-next/static/assets/txn-6.png b/site2/website-next/static/assets/txn-6.png
new file mode 100644
index 0000000..83a7151
Binary files /dev/null and b/site2/website-next/static/assets/txn-6.png differ
diff --git a/site2/website-next/static/assets/txn-7.png b/site2/website-next/static/assets/txn-7.png
new file mode 100644
index 0000000..0c71859
Binary files /dev/null and b/site2/website-next/static/assets/txn-7.png differ
diff --git a/site2/website-next/static/assets/txn-8.png b/site2/website-next/static/assets/txn-8.png
new file mode 100644
index 0000000..96654ba
Binary files /dev/null and b/site2/website-next/static/assets/txn-8.png differ
diff --git a/site2/website-next/static/assets/txn-9.png b/site2/website-next/static/assets/txn-9.png
new file mode 100644
index 0000000..1a24056
Binary files /dev/null and b/site2/website-next/static/assets/txn-9.png differ
diff --git a/site2/website-next/versioned_docs/version-2.7.3/concepts-architecture-overview.md b/site2/website-next/versioned_docs/version-2.7.3/concepts-architecture-overview.md
new file mode 100644
index 0000000..57159c1
--- /dev/null
+++ b/site2/website-next/versioned_docs/version-2.7.3/concepts-architecture-overview.md
@@ -0,0 +1,156 @@
+---
+id: concepts-architecture-overview
+title: Architecture Overview
+sidebar_label: Architecture
+original_id: concepts-architecture-overview
+---
+
+At the highest level, a Pulsar instance is composed of one or more Pulsar clusters. Clusters within an instance can [replicate](concepts-replication.md) data amongst themselves.
+
+In a Pulsar cluster:
+
+* One or more brokers handles and load balances incoming messages from producers, dispatches messages to consumers, communicates with the Pulsar configuration store to handle various coordination tasks, stores messages in BookKeeper instances (aka bookies), relies on a cluster-specific ZooKeeper cluster for certain tasks, and more.
+* A BookKeeper cluster consisting of one or more bookies handles [persistent storage](#persistent-storage) of messages.
+* A ZooKeeper cluster specific to that cluster handles coordination tasks between Pulsar clusters.
+
+The diagram below provides an illustration of a Pulsar cluster:
+
+![Pulsar architecture diagram](/assets/pulsar-system-architecture.png)
+
+At the broader instance level, an instance-wide ZooKeeper cluster called the configuration store handles coordination tasks involving multiple clusters, for example [geo-replication](concepts-replication.md).
+
+## Brokers
+
+The Pulsar message broker is a stateless component that's primarily responsible for running two other components:
+
+* An HTTP server that exposes a {@inject: rest:REST:/} API for both administrative tasks and [topic lookup](concepts-clients.md#client-setup-phase) for producers and consumers
+* A dispatcher, which is an asynchronous TCP server over a custom [binary protocol](developing-binary-protocol.md) used for all data transfers
+
+Messages are typically dispatched out of a [managed ledger](#managed-ledgers) cache for the sake of performance, *unless* the backlog exceeds the cache size. If the backlog grows too large for the cache, the broker will start reading entries from BookKeeper.
+
+Finally, to support geo-replication on global topics, the broker manages replicators that tail the entries published in the local region and republish them to the remote region using the Pulsar [Java client library](client-libraries-java.md).
+
+> For a guide to managing Pulsar brokers, see the [brokers](admin-api-brokers.md) guide.
+
+## Clusters
+
+A Pulsar instance consists of one or more Pulsar *clusters*. Clusters, in turn, consist of:
+
+* One or more Pulsar [brokers](#brokers)
+* A ZooKeeper quorum used for cluster-level configuration and coordination
+* An ensemble of bookies used for [persistent storage](#persistent-storage) of messages
+
+Clusters can replicate amongst themselves using [geo-replication](concepts-replication.md).
+
+> For a guide to managing Pulsar clusters, see the [clusters](admin-api-clusters.md) guide.
+
+## Metadata store
+
+Pulsar uses [Apache Zookeeper](https://zookeeper.apache.org/) for metadata storage, cluster configuration, and coordination. In a Pulsar instance:
+
+* A configuration store quorum stores configuration for tenants, namespaces, and other entities that need to be globally consistent.
+* Each cluster has its own local ZooKeeper ensemble that stores cluster-specific configuration and coordination such as which brokers are responsible for which topics as well as ownership metadata, broker load reports, BookKeeper ledger metadata, and more.
+
+## Persistent storage
+
+Pulsar provides guaranteed message delivery for applications. If a message successfully reaches a Pulsar broker, it will be delivered to its intended target.
+
+This guarantee requires that non-acknowledged messages are stored in a durable manner until they can be delivered to and acknowledged by consumers. This mode of messaging is commonly called *persistent messaging*. In Pulsar, N copies of all messages are stored and synced on disk, for example 4 copies across two servers with mirrored [RAID](https://en.wikipedia.org/wiki/RAID) volumes on each server.
+
+### Apache BookKeeper
+
+Pulsar uses a system called [Apache BookKeeper](http://bookkeeper.apache.org/) for persistent message storage. BookKeeper is a distributed [write-ahead log](https://en.wikipedia.org/wiki/Write-ahead_logging) (WAL) system that provides a number of crucial advantages for Pulsar:
+
+* It enables Pulsar to utilize many independent logs, called [ledgers](#ledgers). Multiple ledgers can be created for topics over time.
+* It offers very efficient storage for sequential data that handles entry replication.
+* It guarantees read consistency of ledgers in the presence of various system failures.
+* It offers even distribution of I/O across bookies.
+* It's horizontally scalable in both capacity and throughput. Capacity can be immediately increased by adding more bookies to a cluster.
+* Bookies are designed to handle thousands of ledgers with concurrent reads and writes. By using multiple disk devices---one for journal and another for general storage--bookies are able to isolate the effects of read operations from the latency of ongoing write operations.
+
+In addition to message data, *cursors* are also persistently stored in BookKeeper. Cursors are [subscription](reference-terminology.md#subscription) positions for [consumers](reference-terminology.md#consumer). BookKeeper enables Pulsar to store consumer position in a scalable fashion.
+
+At the moment, Pulsar supports persistent message storage. This accounts for the `persistent` in all topic names. Here's an example:
+
+```http
+persistent://my-tenant/my-namespace/my-topic
+```
+
+> Pulsar also supports ephemeral ([non-persistent](concepts-messaging.md#non-persistent-topics)) message storage.
+
+
+You can see an illustration of how brokers and bookies interact in the diagram below:
+
+![Brokers and bookies](/assets/broker-bookie.png)
+
+
+### Ledgers
+
+A ledger is an append-only data structure with a single writer that is assigned to multiple BookKeeper storage nodes, or bookies. Ledger entries are replicated to multiple bookies. Ledgers themselves have very simple semantics:
+
+* A Pulsar broker can create a ledger, append entries to the ledger, and close the ledger.
+* After the ledger has been closed---either explicitly or because the writer process crashed---it can then be opened only in read-only mode.
+* Finally, when entries in the ledger are no longer needed, the whole ledger can be deleted from the system (across all bookies).
+
+#### Ledger read consistency
+
+The main strength of Bookkeeper is that it guarantees read consistency in ledgers in the presence of failures. Since the ledger can only be written to by a single process, that process is free to append entries very efficiently, without need to obtain consensus. After a failure, the ledger will go through a recovery process that will finalize the state of the ledger and establish which entry was last committed to the log. After that point, all readers of the ledger are guaranteed to see  [...]
+
+#### Managed ledgers
+
+Given that Bookkeeper ledgers provide a single log abstraction, a library was developed on top of the ledger called the *managed ledger* that represents the storage layer for a single topic. A managed ledger represents the abstraction of a stream of messages with a single writer that keeps appending at the end of the stream and multiple cursors that are consuming the stream, each with its own associated position.
+
+Internally, a single managed ledger uses multiple BookKeeper ledgers to store the data. There are two reasons to have multiple ledgers:
+
+1. After a failure, a ledger is no longer writable and a new one needs to be created.
+2. A ledger can be deleted when all cursors have consumed the messages it contains. This allows for periodic rollover of ledgers.
+
+### Journal storage
+
+In BookKeeper, *journal* files contain BookKeeper transaction logs. Before making an update to a [ledger](#ledgers), a bookie needs to ensure that a transaction describing the update is written to persistent (non-volatile) storage. A new journal file is created once the bookie starts or the older journal file reaches the journal file size threshold (configured using the [`journalMaxSizeMB`](reference-configuration.md#bookkeeper-journalMaxSizeMB) parameter).
+
+## Pulsar proxy
+
+One way for Pulsar clients to interact with a Pulsar [cluster](#clusters) is by connecting to Pulsar message [brokers](#brokers) directly. In some cases, however, this kind of direct connection is either infeasible or undesirable because the client doesn't have direct access to broker addresses. If you're running Pulsar in a cloud environment or on [Kubernetes](https://kubernetes.io) or an analogous platform, for example, then direct client connections to brokers are likely not possible.
+
+The **Pulsar proxy** provides a solution to this problem by acting as a single gateway for all of the brokers in a cluster. If you run the Pulsar proxy (which, again, is optional), all client connections with the Pulsar cluster will flow through the proxy rather than communicating with brokers.
+
+> For the sake of performance and fault tolerance, you can run as many instances of the Pulsar proxy as you'd like.
+
+Architecturally, the Pulsar proxy gets all the information it requires from ZooKeeper. When starting the proxy on a machine, you only need to provide ZooKeeper connection strings for the cluster-specific and instance-wide configuration store clusters. Here's an example:
+
+```bash
+$ bin/pulsar proxy \
+  --zookeeper-servers zk-0,zk-1,zk-2 \
+  --configuration-store-servers zk-0,zk-1,zk-2
+```
+
+> #### Pulsar proxy docs
+> For documentation on using the Pulsar proxy, see the [Pulsar proxy admin documentation](administration-proxy.md).
+
+
+Some important things to know about the Pulsar proxy:
+
+* Connecting clients don't need to provide *any* specific configuration to use the Pulsar proxy. You won't need to update the client configuration for existing applications beyond updating the IP used for the service URL (for example if you're running a load balancer over the Pulsar proxy).
+* [TLS encryption](security-tls-transport.md) and [authentication](security-tls-authentication.md) is supported by the Pulsar proxy
+
+## Service discovery
+
+[Clients](getting-started-clients.md) connecting to Pulsar brokers need to be able to communicate with an entire Pulsar instance using a single URL. Pulsar provides a built-in service discovery mechanism that you can set up using the instructions in the [Deploying a Pulsar instance](deploy-bare-metal.md#service-discovery-setup) guide.
+
+You can use your own service discovery system if you'd like. If you use your own system, there is just one requirement: when a client performs an HTTP request to an endpoint, such as `http://pulsar.us-west.example.com:8080`, the client needs to be redirected to *some* active broker in the desired cluster, whether via DNS, an HTTP or IP redirect, or some other means.
+
+The diagram below illustrates Pulsar service discovery:
+
+![alt-text](/assets/pulsar-service-discovery.png)
+
+In this diagram, the Pulsar cluster is addressable via a single DNS name: `pulsar-cluster.acme.com`. A [Python client](client-libraries-python.md), for example, could access this Pulsar cluster like this:
+
+```python
+from pulsar import Client
+
+client = Client('pulsar://pulsar-cluster.acme.com:6650')
+```
+
+> **Note**
+> In Pulsar, each topic is handled by only one broker. Initial requests from a client to read, update or delete a topic are sent to a broker that may not be the topic owner. If the broker cannot handle the request for this topic, it redirects the request to the appropriate broker.
diff --git a/site2/website-next/versioned_docs/version-2.7.3/concepts-authentication.md b/site2/website-next/versioned_docs/version-2.7.3/concepts-authentication.md
new file mode 100644
index 0000000..781d068
--- /dev/null
+++ b/site2/website-next/versioned_docs/version-2.7.3/concepts-authentication.md
@@ -0,0 +1,9 @@
+---
+id: concepts-authentication
+title: Authentication and Authorization
+sidebar_label: Authentication and Authorization
+original_id: concepts-authentication
+---
+
+Pulsar supports a pluggable [authentication](security-overview.md) mechanism which can be configured at the proxy and/or the broker. Pulsar also supports a pluggable [authorization](security-authorization.md) mechanism. These mechanisms work together to identify the client and its access rights on topics, namespaces and tenants.
+
diff --git a/site2/website-next/versioned_docs/version-2.7.3/concepts-clients.md b/site2/website-next/versioned_docs/version-2.7.3/concepts-clients.md
new file mode 100644
index 0000000..1ce2865
--- /dev/null
+++ b/site2/website-next/versioned_docs/version-2.7.3/concepts-clients.md
@@ -0,0 +1,85 @@
+---
+id: concepts-clients
+title: Pulsar Clients
+sidebar_label: Clients
+original_id: concepts-clients
+---
+
+Pulsar exposes a client API with language bindings for [Java](client-libraries-java.md), [Go](client-libraries-go.md), [Python](client-libraries-python.md), [C++](client-libraries-cpp.md) and [C#](client-libraries-dotnet.md). The client API optimizes and encapsulates Pulsar's client-broker communication protocol and exposes a simple and intuitive API for use by applications.
+
+Under the hood, the current official Pulsar client libraries support transparent reconnection and/or connection failover to brokers, queuing of messages until acknowledged by the broker, and heuristics such as connection retries with backoff.
+
+> **Custom client libraries**
+> If you'd like to create your own client library, we recommend consulting the documentation on Pulsar's custom [binary protocol](developing-binary-protocol.md).
+
+
+## Client setup phase
+
+Before an application creates a producer/consumer, the Pulsar client library needs to initiate a setup phase including two steps:
+
+1. The client attempts to determine the owner of the topic by sending an HTTP lookup request to the broker. The request could reach one of the active brokers which, by looking at the (cached) zookeeper metadata knows who is serving the topic or, in case nobody is serving it, tries to assign it to the least loaded broker.
+1. Once the client library has the broker address, it creates a TCP connection (or reuse an existing connection from the pool) and authenticates it. Within this connection, client and broker exchange binary commands from a custom protocol. At this point the client sends a command to create producer/consumer to the broker, which will comply after having validated the authorization policy.
+
+Whenever the TCP connection breaks, the client immediately re-initiates this setup phase and keeps trying with exponential backoff to re-establish the producer or consumer until the operation succeeds.
+
+## Reader interface
+
+In Pulsar, the "standard" [consumer interface](concepts-messaging.md#consumers) involves using consumers to listen on [topics](reference-terminology.md#topic), process incoming messages, and finally acknowledge those messages when they are processed. Whenever a new subscription is created, it is initially positioned at the end of the topic (by default), and consumers associated with that subscription begin reading with the first message created afterwards.  Whenever a consumer connects t [...]
+
+The **reader interface** for Pulsar enables applications to manually manage cursors. When you use a reader to connect to a topic---rather than a consumer---you need to specify *which* message the reader begins reading from when it connects to a topic. When connecting to a topic, the reader interface enables you to begin with:
+
+* The **earliest** available message in the topic
+* The **latest** available message in the topic
+* Some other message between the earliest and the latest. If you select this option, you'll need to explicitly provide a message ID. Your application will be responsible for "knowing" this message ID in advance, perhaps fetching it from a persistent data store or cache.
+
+The reader interface is helpful for use cases like using Pulsar to provide effectively-once processing semantics for a stream processing system. For this use case, it's essential that the stream processing system be able to "rewind" topics to a specific message and begin reading there. The reader interface provides Pulsar clients with the low-level abstraction necessary to "manually position" themselves within a topic.
+
+Internally, the reader interface is implemented as a consumer using an exclusive, non-durable subscription to the topic with a randomly-allocated name.
+
+[ **IMPORTANT** ]
+
+Unlike subscription/consumer, readers are non-durable in nature and does not prevent data in a topic from being deleted, thus it is ***strongly*** advised that [data retention](cookbooks-retention-expiry.md) be configured. If data retention for a topic is not configured for an adequate amount of time, messages that the reader has not yet read might be deleted .  This causes the readers to essentially skip messages. Configuring the data retention for a topic guarantees the reader with a c [...]
+
+Please also note that a reader can have a "backlog", but the metric is only used for users to know how behind the reader is. The metric is not considered for any backlog quota calculations. 
+
+![The Pulsar consumer and reader interfaces](/assets/pulsar-reader-consumer-interfaces.png)
+
+Here's a Java example that begins reading from the earliest available message on a topic:
+
+```java
+import org.apache.pulsar.client.api.Message;
+import org.apache.pulsar.client.api.MessageId;
+import org.apache.pulsar.client.api.Reader;
+
+// Create a reader on a topic and for a specific message (and onward)
+Reader<byte[]> reader = pulsarClient.newReader()
+    .topic("reader-api-test")
+    .startMessageId(MessageId.earliest)
+    .create();
+
+while (true) {
+    Message message = reader.readNext();
+
+    // Process the message
+}
+```
+
+To create a reader that reads from the latest available message:
+
+```java
+Reader<byte[]> reader = pulsarClient.newReader()
+    .topic(topic)
+    .startMessageId(MessageId.latest)
+    .create();
+```
+
+To create a reader that reads from some message between the earliest and the latest:
+
+```java
+byte[] msgIdBytes = // Some byte array
+MessageId id = MessageId.fromByteArray(msgIdBytes);
+Reader<byte[]> reader = pulsarClient.newReader()
+    .topic(topic)
+    .startMessageId(id)
+    .create();
+```
diff --git a/site2/website-next/versioned_docs/version-2.7.3/concepts-messaging.md b/site2/website-next/versioned_docs/version-2.7.3/concepts-messaging.md
new file mode 100644
index 0000000..02c7e1e
--- /dev/null
+++ b/site2/website-next/versioned_docs/version-2.7.3/concepts-messaging.md
@@ -0,0 +1,521 @@
+---
+id: concepts-messaging
+title: Messaging
+sidebar_label: Messaging
+original_id: concepts-messaging
+---
+
+Pulsar is built on the [publish-subscribe](https://en.wikipedia.org/wiki/Publish%E2%80%93subscribe_pattern) pattern (often abbreviated to pub-sub). In this pattern, [producers](#producers) publish messages to [topics](#topics). [Consumers](#consumers) [subscribe](#subscription-modes) to those topics, process incoming messages, and send an acknowledgement when processing is complete.
+
+When a subscription is created, Pulsar [retains](concepts-architecture-overview.md#persistent-storage) all messages, even if the consumer is disconnected. Retained messages are discarded only when a consumer acknowledges that those messages are processed successfully.
+
+## Messages
+
+Messages are the basic "unit" of Pulsar. The following table lists the components of messages.
+
+Component | Description
+:---------|:-------
+Value / data payload | The data carried by the message. All Pulsar messages contain raw bytes, although message data can also conform to data [schemas](schema-get-started.md).
+Key | Messages are optionally tagged with keys, which is useful for things like [topic compaction](concepts-topic-compaction.md).
+Properties | An optional key/value map of user-defined properties.
+Producer name | The name of the producer who produces the message. If you do not specify a producer name, the default name is used. 
+Sequence ID | Each Pulsar message belongs to an ordered sequence on its topic. The sequence ID of the message is its order in that sequence.
+Publish time | The timestamp of when the message is published. The timestamp is automatically applied by the producer.
+Event time | An optional timestamp attached to a message by applications. For example, applications attach a timestamp on when the message is processed. If nothing is set to event time, the value is `0`. 
+TypedMessageBuilder | It is used to construct a message. You can set message properties such as the message key, message value with `TypedMessageBuilder`. <br /> When you set `TypedMessageBuilder`, set the key as a string. If you set the key as other types, for example, an AVRO object, the key is sent as bytes, and it is difficult to get the AVRO object back on the consumer.
+
+> For more information on Pulsar message contents, see Pulsar [binary protocol](developing-binary-protocol.md).
+
+## Producers
+
+A producer is a process that attaches to a topic and publishes messages to a Pulsar [broker](reference-terminology.md#broker). The Pulsar broker process the messages.
+
+### Send modes
+
+Producers send messages to brokers synchronously (sync) or asynchronously (async).
+
+| Mode       | Description |            
+|:-----------|-----------|
+| Sync send  | The producer waits for an acknowledgement from the broker after sending every message. If the acknowledgment is not received, the producer treats the sending operation as a failure.                                                                                                                                                                                    |
+| Async send | The producer puts a message in a blocking queue and returns immediately. The client library sends the message to the broker in the background. If the queue is full (you can [configure](reference-configuration.md#broker) the maximum size), the producer is blocked or fails immediately when calling the API, depending on arguments passed to the producer. |
+
+### Compression
+
+You can compress messages published by producers during transportation. Pulsar currently supports the following types of compression:
+
+* [LZ4](https://github.com/lz4/lz4)
+* [ZLIB](https://zlib.net/)
+* [ZSTD](https://facebook.github.io/zstd/)
+* [SNAPPY](https://google.github.io/snappy/)
+
+### Batching
+
+When batching is enabled, the producer accumulates and sends a batch of messages in a single request. The batch size is defined by the maximum number of messages and the maximum publish latency. Therefore, the backlog size represents the total number of batches instead of the total number of messages.
+
+In Pulsar, batches are tracked and stored as single units rather than as individual messages. Consumer unbundles a batch into individual messages. However, scheduled messages (configured through the `deliverAt` or the `deliverAfter` parameter) are always sent as individual messages even batching is enabled.
+
+In general, a batch is acknowledged when all of its messages are acknowledged by a consumer. It means unexpected failures, negative acknowledgements, or acknowledgement timeouts can result in redelivery of all messages in a batch, even if some of the messages are acknowledged.
+
+To avoid redelivering acknowledged messages in a batch to the consumer, Pulsar introduces batch index acknowledgement since Pulsar 2.6.0. When batch index acknowledgement is enabled, the consumer filters out the batch index that has been acknowledged and sends the batch index acknowledgement request to the broker. The broker maintains the batch index acknowledgement status and tracks the acknowledgement status of each batch index to avoid dispatching acknowledged messages to the consumer [...]
+
+By default, batch index acknowledgement is disabled (`acknowledgmentAtBatchIndexLevelEnabled=false`). You can enable batch index acknowledgement by setting the `acknowledgmentAtBatchIndexLevelEnabled` parameter to `true` at the broker side. Enabling batch index acknowledgement results in more memory overheads. 
+
+### Chunking
+When you enable chunking, read the following instructions.
+- Batching and chunking cannot be enabled simultaneously. To enable chunking, you must disable batching in advance.
+- Chunking is only supported for persisted topics.
+- Chunking is only supported for the exclusive and failover subscription modes.
+
+When chunking is enabled (`chunkingEnabled=true`), if the message size is greater than the allowed maximum publish-payload size, the producer splits the original message into chunked messages and publishes them with chunked metadata to the broker separately and in order. At the broker side, the chunked messages are stored in the managed-ledger in the same way as that of ordinary messages. The only difference is that the consumer needs to buffer the chunked messages and combines them into [...]
+
+The consumer consumes the chunked messages and buffers them until the consumer receives all the chunks of a message. And then the consumer stitches chunked messages together and places them into the receiver-queue. Clients consume messages from the receiver-queue. Once the consumer consumes the entire large message and acknowledges it, the consumer internally sends acknowledgement of all the chunk messages associated to that large message. You can set the `maxPendingChuckedMessage` param [...]
+
+ The broker does not require any changes to support chunking for non-shared subscription. The broker only uses `chuckedMessageRate` to record chunked message rate on the topic.
+
+#### Handle chunked messages with one producer and one ordered consumer
+
+As shown in the following figure, when a topic has one producer which publishes large message payload in chunked messages along with regular non-chunked messages. The producer publishes message M1 in three chunks M1-C1, M1-C2 and M1-C3. The broker stores all the three chunked messages in the managed-ledger and dispatches to the ordered (exclusive/failover) consumer in the same order. The consumer buffers all the chunked messages in memory until it receives all the chunked messages, combi [...]
+
+![](/assets/chunking-01.png)
+
+#### Handle chunked messages with multiple producers and one ordered consumer
+
+When multiple publishers publish chunked messages into a single topic, the broker stores all the chunked messages coming from different publishers in the same managed-ledger. As shown below, Producer 1 publishes message M1 in three chunks M1-C1, M1-C2 and M1-C3. Producer 2 publishes message M2 in three chunks M2-C1, M2-C2 and M2-C3. All chunked messages of the specific message are still in order but might not be consecutive in the managed-ledger. This brings some memory pressure to the c [...]
+
+![](/assets/chunking-02.png)
+
+## Consumers
+
+A consumer is a process that attaches to a topic via a subscription and then receives messages.
+
+A consumer sends a [flow permit request](developing-binary-protocol.md#flow-control) to a broker to get messages. There is a queue at the consumer side to receive messages pushed from the broker. You can configure the queue size with the [`receiverQueueSize`](client-libraries-java.md#configure-consumer) parameter. The default size is `1000`). Each time `consumer.receive()` is called, a message is dequeued from the buffer.  
+
+### Receive modes
+
+Messages are received from [brokers](reference-terminology.md#broker) either synchronously (sync) or asynchronously (async).
+
+| Mode          | Description                                                                                                                                                                                                   |
+|:--------------|:--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| Sync receive  | A sync receive is blocked until a message is available.                                                                                                                                                  |
+| Async receive | An async receive returns immediately with a future value—for example, a [`CompletableFuture`](http://www.baeldung.com/java-completablefuture) in Java—that completes once a new message is available. |
+
+### Listeners
+
+Client libraries provide listener implementation for consumers. For example, the [Java client](client-libraries-java.md) provides a {@inject: javadoc:MesssageListener:/client/org/apache/pulsar/client/api/MessageListener} interface. In this interface, the `received` method is called whenever a new message is received.
+
+### Acknowledgement
+
+When a consumer consumes a message successfully, the consumer sends an acknowledgement request to the broker. This message is permanently stored, and then deleted only after all the subscriptions have acknowledged it. If you want to store the message that has been acknowledged by a consumer, you need to configure the [message retention policy](concepts-messaging.md#message-retention-and-expiry).
+
+For a batch message, if batch index acknowledgement is enabled, the broker maintains the batch index acknowledgement status and tracks the acknowledgement status of each batch index to avoid dispatching acknowledged messages to the consumer. When all indexes of the batch message are acknowledged, the batch message is deleted. For details about the batch index acknowledgement, see [batching](#batching).
+
+Messages can be acknowledged in the following two ways:
+
+- Messages are acknowledged individually. With individual acknowledgement, the consumer needs to acknowledge each message and sends an acknowledgement request to the broker.
+- Messages are acknowledged cumulatively. With cumulative acknowledgement, the consumer only needs to acknowledge the last message it received. All messages in the stream up to (and including) the provided message are not re-delivered to that consumer.
+
+> **Note**
+> Cumulative acknowledgement cannot be used in the [shared subscription mode](#subscription-modes), because the shared subscription mode involves multiple consumers who have access to the same subscription. In the shared subscription mode, messages are acknowledged individually.
+
+### Negative acknowledgement
+
+When a consumer does not consume a message successfully at a time, and wants to consume the message again, the consumer sends a negative acknowledgement to the broker, and then the broker redelivers the message.
+
+Messages are negatively acknowledged one by one or cumulatively, which depends on the consumption subscription mode.
+
+In the exclusive and failover subscription modes, consumers only negatively acknowledge the last message they receive.
+
+In the shared and Key_Shared subscription modes, you can negatively acknowledge messages individually.
+
+Be aware that negative acknowledgment on ordered subscription types, such as Exclusive, Failover and Key_Shared, can cause failed messages to arrive consumers out of the original order.
+
+> **Note**
+> If batching is enabled, other messages and the negatively acknowledged messages in the same batch are redelivered to the consumer.
+
+### Acknowledgement timeout
+
+If a message is not consumed successfully, and you want to trigger the broker to redeliver the message automatically, you can adopt the unacknowledged message automatic re-delivery mechanism. Client tracks the unacknowledged messages within the entire `acktimeout` time range, and sends a `redeliver unacknowledged messages` request to the broker automatically when the acknowledgement timeout is specified.
+
+> **Note**
+> If batching is enabled, other messages and the unacknowledged messages in the same batch are redelivered to the consumer.
+
+> **Note**    
+> Prefer negative acknowledgements over acknowledgement timeout. Negative acknowledgement controls the re-delivery of individual messages with more precision, and avoids invalid redeliveries when the message processing time exceeds the acknowledgement timeout.
+
+### Dead letter topic
+
+Dead letter topic enables you to consume new messages when some messages cannot be consumed successfully by a consumer. In this mechanism, messages that are failed to be consumed are stored in a separate topic, which is called dead letter topic. You can decide how to handle messages in the dead letter topic.
+
+The following example shows how to enable dead letter topic in a Java client using the default dead letter topic:
+
+```java
+Consumer<byte[]> consumer = pulsarClient.newConsumer(Schema.BYTES)
+              .topic(topic)
+              .subscriptionName("my-subscription")
+              .subscriptionType(SubscriptionType.Shared)
+              .deadLetterPolicy(DeadLetterPolicy.builder()
+                    .maxRedeliverCount(maxRedeliveryCount)
+                    .build())
+              .subscribe();
+                
+```
+The default dead letter topic uses this format: 
+```
+<topicname>-<subscriptionname>-DLQ
+```
+  
+If you want to specify the name of the dead letter topic, use this Java client example:
+
+```java
+Consumer<byte[]> consumer = pulsarClient.newConsumer(Schema.BYTES)
+              .topic(topic)
+              .subscriptionName("my-subscription")
+              .subscriptionType(SubscriptionType.Shared)
+              .deadLetterPolicy(DeadLetterPolicy.builder()
+                    .maxRedeliverCount(maxRedeliveryCount)
+                    .deadLetterTopic("your-topic-name")
+                    .build())
+              .subscribe();
+                
+```
+  
+Dead letter topic depends on message re-delivery. Messages are redelivered either due to [acknowledgement timeout](#acknowledgement-timeout) or [negative acknowledgement](#negative-acknowledgement). If you are going to use negative acknowledgement on a message, make sure it is negatively acknowledged before the acknowledgement timeout. 
+
+> **Note**    
+> Currently, dead letter topic is enabled only in the shared subscription mode.
+
+### Retry letter topic
+
+For many online business systems, a message is re-consumed due to exception occurs in the business logic processing. To configure the delay time for re-consuming the failed messages, you can configure the producer to send messages to both the business topic and the retry letter topic, and enable automatic retry on the consumer. When automatic retry is enabled on the consumer, a message is stored in the retry letter topic if the messages are not consumed, and therefore the consumer automa [...]
+
+By default, automatic retry is disabled. You can set `enableRetry` to `true` to enable automatic retry on the consumer.
+
+This example shows how to consume messages from a retry letter topic.
+
+```java
+Consumer<byte[]> consumer = pulsarClient.newConsumer(Schema.BYTES)
+                .topic(topic)
+                .subscriptionName("my-subscription")
+                .subscriptionType(SubscriptionType.Shared)
+                .enableRetry(true)
+                .receiverQueueSize(100)
+                .deadLetterPolicy(DeadLetterPolicy.builder()
+                        .maxRedeliverCount(maxRedeliveryCount)
+                        .retryLetterTopic("persistent://my-property/my-ns/my-subscription-custom-Retry")
+                        .build())
+                .subscriptionInitialPosition(SubscriptionInitialPosition.Earliest)
+                .subscribe();
+```
+
+## Topics
+
+As in other pub-sub systems, topics in Pulsar are named channels for transmitting messages from producers to consumers. Topic names are URLs that have a well-defined structure:
+
+```http
+{persistent|non-persistent}://tenant/namespace/topic
+```
+
+Topic name component | Description
+:--------------------|:-----------
+`persistent` / `non-persistent` | This identifies the type of topic. Pulsar supports two kind of topics: [persistent](concepts-architecture-overview.md#persistent-storage) and [non-persistent](#non-persistent-topics). The default is persistent, so if you do not specify a type, the topic is persistent. With persistent topics, all messages are durably persisted on disks (if the broker is not standalone, messages are durably persisted on multiple disks), whereas data for non-persistent topi [...]
+`tenant`             | The topic tenant within the instance. Tenants are essential to multi-tenancy in Pulsar, and spread across clusters.
+`namespace`          | The administrative unit of the topic, which acts as a grouping mechanism for related topics. Most topic configuration is performed at the [namespace](#namespaces) level. Each tenant has one or multiple namespaces.
+`topic`              | The final part of the name. Topic names have no special meaning in a Pulsar instance.
+
+> **No need to explicitly create new topics**
+> You do not need to explicitly create topics in Pulsar. If a client attempts to write or receive messages to/from a topic that does not yet exist, Pulsar creates that topic under the namespace provided in the [topic name](#topics) automatically.
+> If no tenant or namespace is specified when a client creates a topic, the topic is created in the default tenant and namespace. You can also create a topic in a specified tenant and namespace, such as `persistent://my-tenant/my-namespace/my-topic`. `persistent://my-tenant/my-namespace/my-topic` means the `my-topic` topic is created in the `my-namespace` namespace of the `my-tenant` tenant.
+
+## Namespaces
+
+A namespace is a logical nomenclature within a tenant. A tenant creates multiple namespaces via the [admin API](admin-api-namespaces.md#create). For instance, a tenant with different applications can create a separate namespace for each application. A namespace allows the application to create and manage a hierarchy of topics. The topic `my-tenant/app1` is a namespace for the application `app1` for `my-tenant`. You can create any number of [topics](#topics) under the namespace.
+
+## Subscriptions
+
+A subscription is a named configuration rule that determines how messages are delivered to consumers. Four subscription modes are available in Pulsar: [exclusive](#exclusive), [shared](#shared), [failover](#failover), and [key_shared](#key_shared). These modes are illustrated in the figure below.
+
+![Subscription modes](/assets/pulsar-subscription-modes.png)
+
+> **Pub-Sub or Queuing**
+> In Pulsar, you can use different subscriptions flexibly.
+> * If you want to achieve traditional "fan-out pub-sub messaging" among consumers, specify a unique subscription name for each consumer. It is exclusive subscription mode.
+> * If you want to achieve "message queuing" among consumers, share the same subscription name among multiple consumers(shared, failover, key_shared).
+> * If you want to achieve both effects simultaneously, combine exclusive subscription mode with other subscription modes for consumers.
+
+### Consumerless Subscriptions and Their Corresponding Modes
+When a subscription has no consumers, its subscription mode is undefined. A subscription's mode is defined when a consumer connects to the subscription, and the mode can be changed by restarting all consumers with a different configuration.
+
+### Exclusive
+
+In *exclusive* mode, only a single consumer is allowed to attach to the subscription. If multiple consumers subscribe to a topic using the same subscription, an error occurs.
+
+In the diagram below, only **Consumer A-0** is allowed to consume messages.
+
+> Exclusive mode is the default subscription mode.
+
+![Exclusive subscriptions](/assets/pulsar-exclusive-subscriptions.png)
+
+### Failover
+
+In *failover* mode, multiple consumers can attach to the same subscription. A master consumer is picked for non-partitioned topic or each partition of partitioned topic and receives messages. When the master consumer disconnects, all (non-acknowledged and subsequent) messages are delivered to the next consumer in line.
+
+For partitioned topics, broker will sort consumers by priority level and lexicographical order of consumer name. Then broker will try to evenly assigns topics to consumers with the highest priority level.
+
+For non-partitioned topic, broker will pick consumer in the order they subscribe to the non partitioned topic.
+
+In the diagram below, **Consumer-B-0** is the master consumer while **Consumer-B-1** would be the next consumer in line to receive messages if **Consumer-B-0** is disconnected.
+
+![Failover subscriptions](/assets/pulsar-failover-subscriptions.png)
+
+### Shared
+
+In *shared* or *round robin* mode, multiple consumers can attach to the same subscription. Messages are delivered in a round robin distribution across consumers, and any given message is delivered to only one consumer. When a consumer disconnects, all the messages that were sent to it and not acknowledged will be rescheduled for sending to the remaining consumers.
+
+In the diagram below, **Consumer-C-1** and **Consumer-C-2** are able to subscribe to the topic, but **Consumer-C-3** and others could as well.
+
+> **Limitations of shared mode**
+> When using shared mode, be aware that:
+> * Message ordering is not guaranteed.
+> * You cannot use cumulative acknowledgment with shared mode.
+
+![Shared subscriptions](/assets/pulsar-shared-subscriptions.png)
+
+### Key_Shared
+
+In *Key_Shared* mode, multiple consumers can attach to the same subscription. Messages are delivered in a distribution across consumers and message with same key or same ordering key are delivered to only one consumer. No matter how many times the message is re-delivered, it is delivered to the same consumer. When a consumer connected or disconnected will cause served consumer change for some key of message.
+
+> **Limitations of Key_Shared mode**
+> When you use Key_Shared mode, be aware that:
+> * You need to specify a key or orderingKey for messages.
+> * You cannot use cumulative acknowledgment with Key_Shared mode.
+> * Your producers should disable batching or use a key-based batch builder.
+
+![Key_Shared subscriptions](/assets/pulsar-key-shared-subscriptions.png)
+
+**You can disable Key_Shared subscription in the `broker.config` file.**
+
+## Multi-topic subscriptions
+
+When a consumer subscribes to a Pulsar topic, by default it subscribes to one specific topic, such as `persistent://public/default/my-topic`. As of Pulsar version 1.23.0-incubating, however, Pulsar consumers can simultaneously subscribe to multiple topics. You can define a list of topics in two ways:
+
+* On the basis of a [**reg**ular **ex**pression](https://en.wikipedia.org/wiki/Regular_expression) (regex), for example `persistent://public/default/finance-.*`
+* By explicitly defining a list of topics
+
+> When subscribing to multiple topics by regex, all topics must be in the same [namespace](#namespaces).
+
+When subscribing to multiple topics, the Pulsar client automatically makes a call to the Pulsar API to discover the topics that match the regex pattern/list, and then subscribe to all of them. If any of the topics do not exist, the consumer auto-subscribes to them once the topics are created.
+
+> **No ordering guarantees across multiple topics**
+> When a producer sends messages to a single topic, all messages are guaranteed to be read from that topic in the same order. However, these guarantees do not hold across multiple topics. So when a producer sends message to multiple topics, the order in which messages are read from those topics is not guaranteed to be the same.
+
+The following are multi-topic subscription examples for Java.
+
+```java
+import java.util.regex.Pattern;
+
+import org.apache.pulsar.client.api.Consumer;
+import org.apache.pulsar.client.api.PulsarClient;
+
+PulsarClient pulsarClient = // Instantiate Pulsar client object
+
+// Subscribe to all topics in a namespace
+Pattern allTopicsInNamespace = Pattern.compile("persistent://public/default/.*");
+Consumer<byte[]> allTopicsConsumer = pulsarClient.newConsumer()
+                .topicsPattern(allTopicsInNamespace)
+                .subscriptionName("subscription-1")
+                .subscribe();
+
+// Subscribe to a subsets of topics in a namespace, based on regex
+Pattern someTopicsInNamespace = Pattern.compile("persistent://public/default/foo.*");
+Consumer<byte[]> someTopicsConsumer = pulsarClient.newConsumer()
+                .topicsPattern(someTopicsInNamespace)
+                .subscriptionName("subscription-1")
+                .subscribe();
+```
+
+For code examples, see [Java](client-libraries-java.md#multi-topic-subscriptions).
+
+## Partitioned topics
+
+Normal topics are served only by a single broker, which limits the maximum throughput of the topic. *Partitioned topics* are a special type of topic that are handled by multiple brokers, thus allowing for higher throughput.
+
+A partitioned topic is actually implemented as N internal topics, where N is the number of partitions. When publishing messages to a partitioned topic, each message is routed to one of several brokers. The distribution of partitions across brokers is handled automatically by Pulsar.
+
+The diagram below illustrates this:
+
+![](/assets/partitioning.png)
+
+The **Topic1** topic has five partitions (**P0** through **P4**) split across three brokers. Because there are more partitions than brokers, two brokers handle two partitions a piece, while the third handles only one (again, Pulsar handles this distribution of partitions automatically).
+
+Messages for this topic are broadcast to two consumers. The [routing mode](#routing-modes) determines each message should be published to which partition, while the [subscription mode](#subscription-modes) determines which messages go to which consumers.
+
+Decisions about routing and subscription modes can be made separately in most cases. In general, throughput concerns should guide partitioning/routing decisions while subscription decisions should be guided by application semantics.
+
+There is no difference between partitioned topics and normal topics in terms of how subscription modes work, as partitioning only determines what happens between when a message is published by a producer and processed and acknowledged by a consumer.
+
+Partitioned topics need to be explicitly created via the [admin API](admin-api-overview.md). The number of partitions can be specified when creating the topic.
+
+### Routing modes
+
+When publishing to partitioned topics, you must specify a *routing mode*. The routing mode determines which partition---that is, which internal topic---each message should be published to.
+
+There are three {@inject: javadoc:MessageRoutingMode:/client/org/apache/pulsar/client/api/MessageRoutingMode} available:
+
+Mode     | Description 
+:--------|:------------
+`RoundRobinPartition` | If no key is provided, the producer will publish messages across all partitions in round-robin fashion to achieve maximum throughput. Please note that round-robin is not done per individual message but rather it's set to the same boundary of batching delay, to ensure batching is effective. While if a key is specified on the message, the partitioned producer will hash the key and assign message to a particular partition. This is the default mode. 
+`SinglePartition`     | If no key is provided, the producer will randomly pick one single partition and publish all the messages into that partition. While if a key is specified on the message, the partitioned producer will hash the key and assign message to a particular partition.
+`CustomPartition`     | Use custom message router implementation that will be called to determine the partition for a particular message. User can create a custom routing mode by using the [Java client](client-libraries-java.md) and implementing the {@inject: javadoc:MessageRouter:/client/org/apache/pulsar/client/api/MessageRouter} interface.
+
+### Ordering guarantee
+
+The ordering of messages is related to MessageRoutingMode and Message Key. Usually, user would want an ordering of Per-key-partition guarantee.
+
+If there is a key attached to message, the messages will be routed to corresponding partitions based on the hashing scheme specified by {@inject: javadoc:HashingScheme:/client/org/apache/pulsar/client/api/HashingScheme} in {@inject: javadoc:ProducerBuilder:/client/org/apache/pulsar/client/api/ProducerBuilder}, when using either `SinglePartition` or `RoundRobinPartition` mode.
+
+Ordering guarantee | Description | Routing Mode and Key
+:------------------|:------------|:------------
+Per-key-partition  | All the messages with the same key will be in order and be placed in same partition. | Use either `SinglePartition` or `RoundRobinPartition` mode, and Key is provided by each message.
+Per-producer       | All the messages from the same producer will be in order. | Use `SinglePartition` mode, and no Key is provided for each message.
+
+### Hashing scheme
+
+{@inject: javadoc:HashingScheme:/client/org/apache/pulsar/client/api/HashingScheme} is an enum that represent sets of standard hashing functions available when choosing the partition to use for a particular message.
+
+There are 2 types of standard hashing functions available: `JavaStringHash` and `Murmur3_32Hash`. 
+The default hashing function for producer is `JavaStringHash`.
+Please pay attention that `JavaStringHash` is not useful when producers can be from different multiple language clients, under this use case, it is recommended to use `Murmur3_32Hash`.
+
+
+
+## Non-persistent topics
+
+
+By default, Pulsar persistently stores *all* unacknowledged messages on multiple [BookKeeper](concepts-architecture-overview.md#persistent-storage) bookies (storage nodes). Data for messages on persistent topics can thus survive broker restarts and subscriber failover.
+
+Pulsar also, however, supports **non-persistent topics**, which are topics on which messages are *never* persisted to disk and live only in memory. When using non-persistent delivery, killing a Pulsar broker or disconnecting a subscriber to a topic means that all in-transit messages are lost on that (non-persistent) topic, meaning that clients may see message loss.
+
+Non-persistent topics have names of this form (note the `non-persistent` in the name):
+
+```http
+non-persistent://tenant/namespace/topic
+```
+
+> For more info on using non-persistent topics, see the [Non-persistent messaging cookbook](cookbooks-non-persistent.md).
+
+In non-persistent topics, brokers immediately deliver messages to all connected subscribers *without persisting them* in [BookKeeper](concepts-architecture-overview.md#persistent-storage). If a subscriber is disconnected, the broker will not be able to deliver those in-transit messages, and subscribers will never be able to receive those messages again. Eliminating the persistent storage step makes messaging on non-persistent topics slightly faster than on persistent topics in some cases [...]
+
+> With non-persistent topics, message data lives only in memory. If a message broker fails or message data can otherwise not be retrieved from memory, your message data may be lost. Use non-persistent topics only if you're *certain* that your use case requires it and can sustain it.
+
+By default, non-persistent topics are enabled on Pulsar brokers. You can disable them in the broker's [configuration](reference-configuration.md#broker-enableNonPersistentTopics). You can manage non-persistent topics using the `pulsar-admin topics` command. For more information, see [`pulsar-admin`](https://pulsar.apache.org/tools/pulsar-admin/).
+
+### Performance
+
+Non-persistent messaging is usually faster than persistent messaging because brokers don't persist messages and immediately send acks back to the producer as soon as that message is delivered to connected brokers. Producers thus see comparatively low publish latency with non-persistent topic.
+
+### Client API
+
+Producers and consumers can connect to non-persistent topics in the same way as persistent topics, with the crucial difference that the topic name must start with `non-persistent`. All three subscription modes---[exclusive](#exclusive), [shared](#shared), and [failover](#failover)---are supported for non-persistent topics.
+
+Here's an example [Java consumer](client-libraries-java.md#consumers) for a non-persistent topic:
+
+```java
+PulsarClient client = PulsarClient.builder()
+        .serviceUrl("pulsar://localhost:6650")
+        .build();
+String npTopic = "non-persistent://public/default/my-topic";
+String subscriptionName = "my-subscription-name";
+
+Consumer<byte[]> consumer = client.newConsumer()
+        .topic(npTopic)
+        .subscriptionName(subscriptionName)
+        .subscribe();
+```
+
+Here's an example [Java producer](client-libraries-java.md#producer) for the same non-persistent topic:
+
+```java
+Producer<byte[]> producer = client.newProducer()
+                .topic(npTopic)
+                .create();
+```
+
+## Message retention and expiry
+
+By default, Pulsar message brokers:
+
+* immediately delete *all* messages that have been acknowledged by a consumer, and
+* [persistently store](concepts-architecture-overview.md#persistent-storage) all unacknowledged messages in a message backlog.
+
+Pulsar has two features, however, that enable you to override this default behavior:
+
+* Message **retention** enables you to store messages that have been acknowledged by a consumer
+* Message **expiry** enables you to set a time to live (TTL) for messages that have not yet been acknowledged
+
+> All message retention and expiry is managed at the [namespace](#namespaces) level. For a how-to, see the [Message retention and expiry](cookbooks-retention-expiry.md) cookbook.
+
+The diagram below illustrates both concepts:
+
+![Message retention and expiry](/assets/retention-expiry.png)
+
+With message retention, shown at the top, a <span style={{color: " #89b557"}}>retention policy</span> applied to all topics in a namespace dictates that some messages are durably stored in Pulsar even though they've already been acknowledged. Acknowledged messages that are not covered by the retention policy are <span style={{color: " #bb3b3e"}}>deleted</span>. Without a retention policy, *all* of the <span style={{color: " #19967d"}}>acknowledged messages</span> would be deleted.
+
+With message expiry, shown at the bottom, some messages are <span style={{color: " #bb3b3e"}}>deleted</span>, even though they <span style={{color: " #337db6"}}>haven't been acknowledged</span>, because they've expired according to the <span style={{color: " #e39441"}}>TTL applied to the namespace</span> (for example because a TTL of 5 minutes has been applied and the messages haven't been acknowledged but are 10 minutes old).
+
+## Message deduplication
+
+Message duplication occurs when a message is [persisted](concepts-architecture-overview.md#persistent-storage) by Pulsar more than once. Message deduplication is an optional Pulsar feature that prevents unnecessary message duplication by processing each message only once, even if the message is received more than once.
+
+The following diagram illustrates what happens when message deduplication is disabled vs. enabled:
+
+![Pulsar message deduplication](/assets/message-deduplication.png)
+
+
+Message deduplication is disabled in the scenario shown at the top. Here, a producer publishes message 1 on a topic; the message reaches a Pulsar broker and is [persisted](concepts-architecture-overview.md#persistent-storage) to BookKeeper. The producer then sends message 1 again (in this case due to some retry logic), and the message is received by the broker and stored in BookKeeper again, which means that duplication has occurred.
+
+In the second scenario at the bottom, the producer publishes message 1, which is received by the broker and persisted, as in the first scenario. When the producer attempts to publish the message again, however, the broker knows that it has already seen message 1 and thus does not persist the message.
+
+> Message deduplication is handled at the namespace level or the topic level. For more instructions, see the [message deduplication cookbook](cookbooks-deduplication.md).
+
+
+### Producer idempotency
+
+The other available approach to message deduplication is to ensure that each message is *only produced once*. This approach is typically called **producer idempotency**. The drawback of this approach is that it defers the work of message deduplication to the application. In Pulsar, this is handled at the [broker](reference-terminology.md#broker) level, so you do not need to modify your Pulsar client code. Instead, you only need to make administrative changes. For details, see [Managing m [...]
+
+### Deduplication and effectively-once semantics
+
+Message deduplication makes Pulsar an ideal messaging system to be used in conjunction with stream processing engines (SPEs) and other systems seeking to provide effectively-once processing semantics. Messaging systems that do not offer automatic message deduplication require the SPE or other system to guarantee deduplication, which means that strict message ordering comes at the cost of burdening the application with the responsibility of deduplication. With Pulsar, strict ordering guar [...]
+
+> You can find more in-depth information in [this post](https://www.splunk.com/en_us/blog/it/exactly-once-is-not-exactly-the-same.html).
+
+## Delayed message delivery
+Delayed message delivery enables you to consume a message later rather than immediately. In this mechanism, a message is stored in BookKeeper, `DelayedDeliveryTracker` maintains the time index(time -> messageId) in memory after published to a broker, and it is delivered to a consumer once the specific delayed time is passed.  
+
+Delayed message delivery only works in Shared subscription mode. In Exclusive and Failover subscription modes, the delayed message is dispatched immediately.
+
+The diagram below illustrates the concept of delayed message delivery:
+
+![Delayed Message Delivery](/assets/message_delay.png)
+
+A broker saves a message without any check. When a consumer consumes a message, if the message is set to delay, then the message is added to `DelayedDeliveryTracker`. A subscription checks and gets timeout messages from `DelayedDeliveryTracker`.
+
+### Broker 
+Delayed message delivery is enabled by default. You can change it in the broker configuration file as below:
+
+```
+# Whether to enable the delayed delivery for messages.
+# If disabled, messages are immediately delivered and there is no tracking overhead.
+delayedDeliveryEnabled=true
+
+# Control the ticking time for the retry of delayed message delivery,
+# affecting the accuracy of the delivery time compared to the scheduled time.
+# Default is 1 second.
+delayedDeliveryTickTimeMillis=1000
+```
+
+### Producer 
+The following is an example of delayed message delivery for a producer in Java:
+```java
+// message to be delivered at the configured delay interval
+producer.newMessage().deliverAfter(3L, TimeUnit.Minute).value("Hello Pulsar!").send();
+```
diff --git a/site2/website-next/versioned_docs/version-2.7.3/concepts-multi-tenancy.md b/site2/website-next/versioned_docs/version-2.7.3/concepts-multi-tenancy.md
new file mode 100644
index 0000000..15c802f
--- /dev/null
+++ b/site2/website-next/versioned_docs/version-2.7.3/concepts-multi-tenancy.md
@@ -0,0 +1,55 @@
+---
+id: concepts-multi-tenancy
+title: Multi Tenancy
+sidebar_label: Multi Tenancy
+original_id: concepts-multi-tenancy
+---
+
+Pulsar was created from the ground up as a multi-tenant system. To support multi-tenancy, Pulsar has a concept of tenants. Tenants can be spread across clusters and can each have their own [authentication and authorization](security-overview.md) scheme applied to them. They are also the administrative unit at which storage quotas, [message TTL](cookbooks-retention-expiry.md#time-to-live-ttl), and isolation policies can be managed.
+
+The multi-tenant nature of Pulsar is reflected mostly visibly in topic URLs, which have this structure:
+
+```http
+persistent://tenant/namespace/topic
+```
+
+As you can see, the tenant is the most basic unit of categorization for topics (more fundamental than the namespace and topic name).
+
+## Tenants
+
+To each tenant in a Pulsar instance you can assign:
+
+* An [authorization](security-authorization.md) scheme
+* The set of [clusters](reference-terminology.md#cluster) to which the tenant's configuration applies
+
+## Namespaces
+
+Tenants and namespaces are two key concepts of Pulsar to support multi-tenancy.
+
+* Pulsar is provisioned for specified tenants with appropriate capacity allocated to the tenant.
+* A namespace is the administrative unit nomenclature within a tenant. The configuration policies set on a namespace apply to all the topics created in that namespace. A tenant may create multiple namespaces via self-administration using the REST API and the [`pulsar-admin`](reference-pulsar-admin.md) CLI tool. For instance, a tenant with different applications can create a separate namespace for each application.
+
+Names for topics in the same namespace will look like this:
+
+```http
+persistent://tenant/app1/topic-1
+
+persistent://tenant/app1/topic-2
+
+persistent://tenant/app1/topic-3
+```
+
+### Namespace change events and topic-level policies
+
+Pulsar is a multi-tenant event streaming system. Administrators can manage the tenants and namespaces by setting policies at different levels. However, the policies, such as retention policy and storage quota policy, are only available at a namespace level. In many use cases, users need to set a policy at the topic level. The namespace change events approach is proposed for supporting topic-level policies in an efficient way. In this approach, Pulsar is used as an event log to store name [...]
+
+- Avoid using ZooKeeper and introduce more loads to ZooKeeper.
+- Use Pulsar as an event log for propagating the policy cache. It can scale efficiently.
+- Use Pulsar SQL to query the namespace changes and audit the system.
+
+Each namespace has a system topic `__change_events`. This system topic is used for storing change events for a given namespace. The following figure illustrates how to use namespace change events to implement a topic-level policy.
+
+1. Pulsar Admin clients communicate with the Admin Restful API to update topic level policies.
+2. Any broker that receives the Admin HTTP request publishes a topic policy change event to the corresponding `__change_events` topic of the namespace.
+3. Each broker that owns a namespace bundle(s) subscribes to the `__change_events` topic to receive change events of the namespace. It then applies the change events to the policy cache.
+4. Once the policy cache is updated, the broker sends the response back to the Pulsar Admin clients.
diff --git a/site2/website-next/versioned_docs/version-2.7.3/concepts-multiple-advertised-listeners.md b/site2/website-next/versioned_docs/version-2.7.3/concepts-multiple-advertised-listeners.md
new file mode 100644
index 0000000..327a4d6
--- /dev/null
+++ b/site2/website-next/versioned_docs/version-2.7.3/concepts-multiple-advertised-listeners.md
@@ -0,0 +1,39 @@
+---
+id: concepts-multiple-advertised-listeners
+title: Multiple advertised listeners
+sidebar_label: Multiple advertised listeners
+original_id: concepts-multiple-advertised-listeners
+---
+
+When a Pulsar cluster is deployed in the production environment, it may require to expose multiple advertised addresses for the broker. For example, when you deploy a Pulsar cluster in Kubernetes and want other clients, which are not in the same Kubernetes cluster, to connect to the Pulsar cluster, you need to assign a broker URL to external clients. But clients in the same Kubernetes cluster can still connect to the Pulsar cluster through the internal network of Kubernetes.
+
+## Advertised listeners
+
+To ensure clients in both internal and external networks can connect to a Pulsar cluster, Pulsar introduces `advertisedListeners` and `internalListenerName` configuration options into the [broker configuration file](reference-configuration.md#broker) to ensure that the broker supports exposing multiple advertised listeners and support the separation of internal and external network traffic.
+
+- The `advertisedListeners` is used to specify multiple advertised listeners. The broker uses the listener as the broker identifier in the load manager and the bundle owner data. The `advertisedListeners` is formatted as `<listener_name>:pulsar://<host>:<port>, <listener_name>:pulsar+ssl://<host>:<port>`. You can set up the `advertisedListeners` like
+`advertisedListeners=internal:pulsar://192.168.1.11:6660,internal:pulsar+ssl://192.168.1.11:6651`.
+
+- The `internalListenerName` is used to specify the internal service URL that the broker uses. You can specify the `internalListenerName` by choosing one of the `advertisedListeners`. The broker uses the listener name of the first advertised listener as the `internalListenerName` if the `internalListenerName` is absent.
+
+After setting up the `advertisedListeners`, clients can choose one of the listeners as the service URL to create a connection to the broker as long as the network is accessible. However, if the client creates producers or consumer on a topic, the client must send a lookup requests to the broker for getting the owner broker, then connect to the owner broker to publish messages or consume messages. Therefore, You must allow the client to get the corresponding service URL with the same adve [...]
+
+## Use multiple advertised listeners
+
+This example shows how a Pulsar client uses multiple advertised listeners.
+
+1. Configure multiple advertised listeners in the broker configuration file.
+
+```shell
+advertisedListeners={listenerName}:pulsar://xxxx:6650,
+{listenerName}:pulsar+ssl://xxxx:6651
+```
+
+2. Specify the listener name for the client.
+
+```java
+PulsarClient client = PulsarClient.builder()
+    .serviceUrl("pulsar://xxxx:6650")
+    .listenerName("external")
+    .build();
+```
diff --git a/site2/website-next/versioned_docs/version-2.7.3/concepts-overview.md b/site2/website-next/versioned_docs/version-2.7.3/concepts-overview.md
new file mode 100644
index 0000000..a37b11f
--- /dev/null
+++ b/site2/website-next/versioned_docs/version-2.7.3/concepts-overview.md
@@ -0,0 +1,31 @@
+---
+id: concepts-overview
+title: Pulsar Overview
+sidebar_label: Overview
+original_id: concepts-overview
+---
+
+Pulsar is a multi-tenant, high-performance solution for server-to-server messaging. Pulsar was originally developed by Yahoo, it is under the stewardship of the [Apache Software Foundation](https://www.apache.org/).
+
+Key features of Pulsar are listed below:
+
+* Native support for multiple clusters in a Pulsar instance, with seamless [geo-replication](administration-geo.md) of messages across clusters.
+* Very low publish and end-to-end latency.
+* Seamless scalability to over a million topics.
+* A simple [client API](concepts-clients.md) with bindings for [Java](client-libraries-java.md), [Go](client-libraries-go.md), [Python](client-libraries-python.md) and [C++](client-libraries-cpp.md).
+* Multiple [subscription modes](concepts-messaging.md#subscription-modes) ([exclusive](concepts-messaging.md#exclusive), [shared](concepts-messaging.md#shared), and [failover](concepts-messaging.md#failover)) for topics.
+* Guaranteed message delivery with [persistent message storage](concepts-architecture-overview.md#persistent-storage) provided by [Apache BookKeeper](http://bookkeeper.apache.org/).
+* A serverless light-weight computing framework [Pulsar Functions](functions-overview.md) offers the capability for stream-native data processing.
+* A serverless connector framework [Pulsar IO](io-overview.md), which is built on Pulsar Functions, makes it easier to move data in and out of Apache Pulsar.
+* [Tiered Storage](concepts-tiered-storage.md) offloads data from hot/warm storage to cold/longterm storage (such as S3 and GCS) when the data is aging out.
+
+## Contents
+
+- [Messaging Concepts](concepts-messaging.md)
+- [Architecture Overview](concepts-architecture-overview.md)
+- [Pulsar Clients](concepts-clients.md)
+- [Geo Replication](concepts-replication.md)
+- [Multi Tenancy](concepts-multi-tenancy.md)
+- [Authentication and Authorization](concepts-authentication.md)
+- [Topic Compaction](concepts-topic-compaction.md)
+- [Tiered Storage](concepts-tiered-storage.md)
diff --git a/site2/website-next/versioned_docs/version-2.7.3/concepts-proxy-sni-routing.md b/site2/website-next/versioned_docs/version-2.7.3/concepts-proxy-sni-routing.md
new file mode 100644
index 0000000..ae761f9
--- /dev/null
+++ b/site2/website-next/versioned_docs/version-2.7.3/concepts-proxy-sni-routing.md
@@ -0,0 +1,121 @@
+---
+id: concepts-proxy-sni-routing
+title: Proxy support with SNI routing
+sidebar_label: Proxy support with SNI routing
+original_id: concepts-proxy-sni-routing
+---
+
+## Pulsar Proxy with SNI routing
+A proxy server is an intermediary server that forwards requests from multiple clients to different servers across the Internet. The proxy server acts as a "traffic cop" in both forward and reverse proxy scenarios, and benefits your system such as load balancing, performance, security, auto-scaling, and so on.
+
+The proxy in Pulsar acts as a reverse proxy, and creates a gateway in front of brokers. Proxies such as Apache Traffic Server (ATS), HAProxy, Nginx, and Envoy are not supported in Pulsar. These proxy-servers support **SNI routing**. SNI routing is used to route traffic to a destination without terminating the SSL connection. Layer 4 routing provides greater transparency because the outbound connection is determined by examining the destination address in the client TCP packets.
+
+Pulsar clients support [SNI routing protocol](https://github.com/apache/pulsar/wiki/PIP-60:-Support-Proxy-server-with-SNI-routing), so you can connect to brokers through the proxy. This document walks you through how to set up the ATS proxy, enable SNI routing, and connect Pulsar client to the broker through the ATS proxy.
+
+### ATS-SNI Routing in Pulsar
+To support [layer-4 SNI routing](https://docs.trafficserver.apache.org/en/latest/admin-guide/layer-4-routing.en.html) with ATS, the inbound connection must be a TLS connection. Pulsar client supports SNI routing protocol on TLS connection, so when Pulsar clients connect to broker through ATS proxy, Pulsar uses ATS as a reverse proxy.
+
+Pulsar supports SNI routing for geo-replication, so brokers can connect to brokers in other clusters through the ATS proxy.
+
+This section explains how to set up and use ATS as a reverse proxy, so Pulsar clients can connect to brokers through the ATS proxy using the SNI routing protocol on TLS connection. 
+
+#### Set up ATS Proxy for layer-4 SNI routing
+To support layer 4 SNI routing, you need to configure the `records.conf` and `ssl_server_name.conf` files.
+
+![Pulsar client SNI](/assets/pulsar-sni-client.png)
+
+The [records.config](https://docs.trafficserver.apache.org/en/latest/admin-guide/files/records.config.en.html) file is located in the `/usr/local/etc/trafficserver/` directory by default. The file lists configurable variables used by the ATS.
+
+To configure the `records.config` files, complete the following steps.
+1. Update TLS port (`http.server_ports`) on which proxy listens, and update proxy certs (`ssl.client.cert.path` and `ssl.client.cert.filename`) to secure TLS tunneling. 
+2. Configure server ports (`http.connect_ports`) used for tunneling to the broker. If Pulsar brokers are listening on `4443` and `6651` ports, add the brokers service port in the `http.connect_ports` configuration.
+
+The following is an example.
+
+```
+# PROXY TLS PORT
+CONFIG proxy.config.http.server_ports STRING 4443:ssl 4080
+# PROXY CERTS FILE PATH
+CONFIG proxy.config.ssl.client.cert.path STRING /proxy-cert.pem
+# PROXY KEY FILE PATH
+CONFIG proxy.config.ssl.client.cert.filename STRING /proxy-key.pem
+
+
+# The range of origin server ports that can be used for tunneling via CONNECT. # Traffic Server allows tunnels only to the specified ports. Supports both wildcards (*) and ranges (e.g. 0-1023).
+CONFIG proxy.config.http.connect_ports STRING 4443 6651
+```
+
+The [ssl_server_name](https://docs.trafficserver.apache.org/en/8.0.x/admin-guide/files/ssl_server_name.yaml.en.html) file is used to configure TLS connection handling for inbound and outbound connections. The configuration is determined by the SNI values provided by the inbound connection. The file consists of a set of configuration items, and each is identified by an SNI value (`fqdn`). When an inbound TLS connection is made, the SNI value from the TLS negotiation is matched with the it [...]
+
+The following example shows mapping of the inbound SNI hostname coming from the client, and the actual broker service URL where request should be redirected. For example, if the client sends the SNI header `pulsar-broker1`, the proxy creates a TLS tunnel by redirecting request to the `pulsar-broker1:6651` service URL.
+
+```
+server_config = {
+  {
+     fqdn = 'pulsar-broker-vip',
+     # Forward to Pulsar broker which is listening on 6651
+     tunnel_route = 'pulsar-broker-vip:6651'
+  },
+  {
+     fqdn = 'pulsar-broker1',
+     # Forward to Pulsar broker-1 which is listening on 6651
+     tunnel_route = 'pulsar-broker1:6651'
+  },
+  {
+     fqdn = 'pulsar-broker2',
+     # Forward to Pulsar broker-2 which is listening on 6651
+     tunnel_route = 'pulsar-broker2:6651'
+  },
+}
+```
+
+After you configure the `ssl_server_name.config` and `records.config` files, the ATS-proxy server handles SNI routing and creates TCP tunnel between the client and the broker.
+
+#### Configure Pulsar-client with SNI routing
+ATS SNI-routing works only with TLS. You need to enable TLS for the ATS proxy and brokers first, configure the SNI routing protocol, and then connect Pulsar clients to brokers through ATS proxy. Pulsar clients support SNI routing by connecting to the proxy, and sending the target broker URL to the SNI header. This process is processed internally. You only need to configure the following proxy configuration initially when you create a Pulsar client to use the SNI routing protocol.
+
+```
+String brokerServiceUrl = “pulsar+ssl://pulsar-broker-vip:6651/”;
+String proxyUrl = “pulsar+ssl://ats-proxy:443”;
+ClientBuilder clientBuilder = PulsarClient.builder()
+		.serviceUrl(brokerServiceUrl)
+        .tlsTrustCertsFilePath(TLS_TRUST_CERT_FILE_PATH)
+        .enableTls(true)
+        .allowTlsInsecureConnection(false)
+        .proxyServiceUrl(proxyUrl, ProxyProtocol.SNI)
+        .operationTimeout(1000, TimeUnit.MILLISECONDS);
+
+Map<String, String> authParams = new HashMap<>();
+authParams.put("tlsCertFile", TLS_CLIENT_CERT_FILE_PATH);
+authParams.put("tlsKeyFile", TLS_CLIENT_KEY_FILE_PATH);
+clientBuilder.authentication(AuthenticationTls.class.getName(), authParams);
+
+PulsarClient pulsarClient = clientBuilder.build();
+```
+
+#### Pulsar geo-replication with SNI routing
+You can use the ATS proxy for geo-replication. Pulsar brokers can connect to brokers in geo-replication by using SNI routing. To enable SNI routing for broker connection cross clusters, you need to configure SNI proxy URL to the cluster metadata. If you have configured SNI proxy URL in the cluster metadata, you can connect to broker cross clusters through the proxy over SNI routing.
+
+![Pulsar client SNI](/assets/pulsar-sni-geo.png)
+
+In this example, a Pulsar cluster is deployed into two separate regions, `us-west` and `us-east`. Both regions are configured with ATS proxy, and brokers in each region run behind the ATS proxy. We configure the cluster metadata for both clusters, so brokers in one cluster can use SNI routing and connect to brokers in other clusters through the ATS proxy.
+
+(a) Configure the cluster metadata for `us-east` with `us-east` broker service URL and `us-east` ATS proxy URL with SNI proxy-protocol.
+
+```
+./pulsar-admin clusters update \
+--broker-url-secure pulsar+ssl://east-broker-vip:6651 \
+--url http://east-broker-vip:8080 \
+--proxy-protocol SNI \
+--proxy-url pulsar+ssl://east-ats-proxy:443
+```
+
+(b) Configure the cluster metadata for `us-west` with `us-west` broker service URL and `us-west` ATS proxy URL with SNI proxy-protocol.
+
+```
+./pulsar-admin clusters update \
+--broker-url-secure pulsar+ssl://west-broker-vip:6651 \
+--url http://west-broker-vip:8080 \
+--proxy-protocol SNI \
+--proxy-url pulsar+ssl://west-ats-proxy:443
+```
diff --git a/site2/website-next/versioned_docs/version-2.7.3/concepts-replication.md b/site2/website-next/versioned_docs/version-2.7.3/concepts-replication.md
new file mode 100644
index 0000000..09f16e4
--- /dev/null
+++ b/site2/website-next/versioned_docs/version-2.7.3/concepts-replication.md
@@ -0,0 +1,9 @@
+---
+id: concepts-replication
+title: Geo Replication
+sidebar_label: Geo Replication
+original_id: concepts-replication
+---
+
+Pulsar enables messages to be produced and consumed in different geo-locations. For instance, your application may be publishing data in one region or market and you would like to process it for consumption in other regions or markets. [Geo-replication](administration-geo.md) in Pulsar enables you to do that.
+
diff --git a/site2/website-next/versioned_docs/version-2.7.3/concepts-tiered-storage.md b/site2/website-next/versioned_docs/version-2.7.3/concepts-tiered-storage.md
new file mode 100644
index 0000000..c00f109
--- /dev/null
+++ b/site2/website-next/versioned_docs/version-2.7.3/concepts-tiered-storage.md
@@ -0,0 +1,18 @@
+---
+id: concepts-tiered-storage
+title: Tiered Storage
+sidebar_label: Tiered Storage
+original_id: concepts-tiered-storage
+---
+
+Pulsar's segment oriented architecture allows for topic backlogs to grow very large, effectively without limit. However, this can become expensive over time.
+
+One way to alleviate this cost is to use Tiered Storage. With tiered storage, older messages in the backlog can be moved from BookKeeper to a cheaper storage mechanism, while still allowing clients to access the backlog as if nothing had changed.
+
+![Tiered Storage](/assets/pulsar-tiered-storage.png)
+
+> Data written to BookKeeper is replicated to 3 physical machines by default. However, once a segment is sealed in BookKeeper it becomes immutable and can be copied to long term storage. Long term storage can achieve cost savings by using mechanisms such as [Reed-Solomon error correction](https://en.wikipedia.org/wiki/Reed%E2%80%93Solomon_error_correction) to require fewer physical copies of data.
+
+Pulsar currently supports S3, Google Cloud Storage (GCS), and filesystem for [long term store](https://pulsar.apache.org/docs/en/cookbooks-tiered-storage/). Offloading to long term storage triggered via a Rest API or command line interface. The user passes in the amount of topic data they wish to retain on BookKeeper, and the broker will copy the backlog data to long term storage. The original data will then be deleted from BookKeeper after a configured delay (4 hours by default).
+
+> For a guide for setting up tiered storage, see the [Tiered storage cookbook](cookbooks-tiered-storage.md).
diff --git a/site2/website-next/versioned_docs/version-2.7.3/concepts-topic-compaction.md b/site2/website-next/versioned_docs/version-2.7.3/concepts-topic-compaction.md
new file mode 100644
index 0000000..3aca60f
--- /dev/null
+++ b/site2/website-next/versioned_docs/version-2.7.3/concepts-topic-compaction.md
@@ -0,0 +1,37 @@
+---
+id: concepts-topic-compaction
+title: Topic Compaction
+sidebar_label: Topic Compaction
+original_id: concepts-topic-compaction
+---
+
+Pulsar was built with highly scalable [persistent storage](concepts-architecture-overview.md#persistent-storage) of message data as a primary objective. Pulsar topics enable you to persistently store as many unacknowledged messages as you need while preserving message ordering. By default, Pulsar stores *all* unacknowledged/unprocessed messages produced on a topic. Accumulating many unacknowledged messages on a topic is necessary for many Pulsar use cases but it can also be very time int [...]
+
+> For a more practical guide to topic compaction, see the [Topic compaction cookbook](cookbooks-compaction.md).
+
+For some use cases consumers don't need a complete "image" of the topic log. They may only need a few values to construct a more "shallow" image of the log, perhaps even just the most recent value. For these kinds of use cases Pulsar offers **topic compaction**. When you run compaction on a topic, Pulsar goes through a topic's backlog and removes messages that are *obscured* by later messages, i.e. it goes through the topic on a per-key basis and leaves only the most recent message assoc [...]
+
+Pulsar's topic compaction feature:
+
+* Allows for faster "rewind" through topic logs
+* Applies only to [persistent topics](concepts-architecture-overview.md#persistent-storage)
+* Triggered automatically when the backlog reaches a certain size or can be triggered manually via the command line. See the [Topic compaction cookbook](cookbooks-compaction.md)
+* Is conceptually and operationally distinct from [retention and expiry](concepts-messaging.md#message-retention-and-expiry). Topic compaction *does*, however, respect retention. If retention has removed a message from the message backlog of a topic, the message will also not be readable from the compacted topic ledger.
+
+> #### Topic compaction example: the stock ticker
+> An example use case for a compacted Pulsar topic would be a stock ticker topic. On a stock ticker topic, each message bears a timestamped dollar value for stocks for purchase (with the message key holding the stock symbol, e.g. `AAPL` or `GOOG`). With a stock ticker you may care only about the most recent value(s) of the stock and have no interest in historical data (i.e. you don't need to construct a complete image of the topic's sequence of messages per key). Compaction would be high [...]
+
+
+## How topic compaction works
+
+When topic compaction is triggered [via the CLI](cookbooks-compaction.md), Pulsar will iterate over the entire topic from beginning to end. For each key that it encounters the compaction routine will keep a record of the latest occurrence of that key.
+
+After that, the broker will create a new [BookKeeper ledger](concepts-architecture-overview.md#ledgers) and make a second iteration through each message on the topic. For each message, if the key matches the latest occurrence of that key, then the key's data payload, message ID, and metadata will be written to the newly created ledger. If the key doesn't match the latest then the message will be skipped and left alone. If any given message has an empty payload, it will be skipped and con [...]
+
+After the initial compaction operation, the Pulsar [broker](reference-terminology.md#broker) that owns the topic is notified whenever any future changes are made to the compaction horizon and compacted backlog. When such changes occur:
+
+* Clients (consumers and readers) that have read compacted enabled will attempt to read messages from a topic and either:
+  * Read from the topic like normal (if the message ID is greater than or equal to the compaction horizon) or
+  * Read beginning at the compaction horizon (if the message ID is lower than the compaction horizon)
+
+
diff --git a/site2/website-next/versioned_docs/version-2.7.3/concepts-transactions.md b/site2/website-next/versioned_docs/version-2.7.3/concepts-transactions.md
new file mode 100644
index 0000000..12a32e8
--- /dev/null
+++ b/site2/website-next/versioned_docs/version-2.7.3/concepts-transactions.md
@@ -0,0 +1,30 @@
+---
+id: transactions
+title: Transactions
+sidebar_label: Overview
+original_id: transactions
+---
+
+Transactional semantics enable event streaming applications to consume, process, and produce messages in one atomic operation. In Pulsar, a producer or consumer can work with messages across multiple topics and partitions and ensure those messages are processed as a single unit. 
+
+The following concepts help you understand Pulsar transactions.
+
+## Transaction coordinator and transaction log
+The transaction coordinator maintains the topics and subscriptions that interact in a transaction. When a transaction is committed, the transaction coordinator interacts with the topic owner broker to complete the transaction.
+
+The transaction coordinator maintains the entire life cycle of transactions, and prevents a transaction from incorrect status.
+
+The transaction coordinator handles transaction timeout, and ensures that the transaction is aborted after a transaction timeout.
+
+All the transaction metadata is persisted in the transaction log. The transaction log is backed by a Pulsar topic. After the transaction coordinator crashes, it can restore the transaction metadata from the transaction log.
+
+## Transaction ID
+The transaction ID (TxnID) identifies a unique transaction in Pulsar. The transaction ID is 128-bit. The highest 16 bits are reserved for the ID of the transaction coordinator, and the remaining bits are used for monotonically increasing numbers in each transaction coordinator. It is easy to locate the transaction crash with the TxnID.
+
+## Transaction buffer
+Messages produced within a transaction are stored in the transaction buffer. The messages in transaction buffer are not materialized (visible) to consumers until the transactions are committed. The messages in the transaction buffer are discarded when the transactions are aborted. 
+
+## Pending acknowledge state
+Message acknowledges within a transaction are maintained by the pending acknowledge state before the transaction completes. If a message is in the pending acknowledge state, the message cannot be acknowledged by other transactions until the message is removed from the pending acknowledge state.
+
+The pending acknowledge state is persisted to the pending acknowledge log. The pending acknowledge log is backed by a Pulsar topic. A new broker can restore the state from the pending acknowledge log to ensure the acknowledgement is not lost.
diff --git a/site2/website-next/versioned_docs/version-2.8.0/concepts-architecture-overview.md b/site2/website-next/versioned_docs/version-2.8.0/concepts-architecture-overview.md
new file mode 100644
index 0000000..7db944e
--- /dev/null
+++ b/site2/website-next/versioned_docs/version-2.8.0/concepts-architecture-overview.md
@@ -0,0 +1,162 @@
+---
+id: concepts-architecture-overview
+title: Architecture Overview
+sidebar_label: Architecture
+original_id: concepts-architecture-overview
+---
+
+At the highest level, a Pulsar instance is composed of one or more Pulsar clusters. Clusters within an instance can [replicate](concepts-replication.md) data amongst themselves.
+
+In a Pulsar cluster:
+
+* One or more brokers handles and load balances incoming messages from producers, dispatches messages to consumers, communicates with the Pulsar configuration store to handle various coordination tasks, stores messages in BookKeeper instances (aka bookies), relies on a cluster-specific ZooKeeper cluster for certain tasks, and more.
+* A BookKeeper cluster consisting of one or more bookies handles [persistent storage](#persistent-storage) of messages.
+* A ZooKeeper cluster specific to that cluster handles coordination tasks between Pulsar clusters.
+
+The diagram below provides an illustration of a Pulsar cluster:
+
+![Pulsar architecture diagram](/assets/pulsar-system-architecture.png)
+
+At the broader instance level, an instance-wide ZooKeeper cluster called the configuration store handles coordination tasks involving multiple clusters, for example [geo-replication](concepts-replication.md).
+
+## Brokers
+
+The Pulsar message broker is a stateless component that's primarily responsible for running two other components:
+
+* An HTTP server that exposes a {@inject: rest:REST:/} API for both administrative tasks and [topic lookup](concepts-clients.md#client-setup-phase) for producers and consumers. The producers connect to the brokers to publish messages and the consumers connect to the brokers to consume the messages.
+* A dispatcher, which is an asynchronous TCP server over a custom [binary protocol](developing-binary-protocol.md) used for all data transfers
+
+Messages are typically dispatched out of a [managed ledger](#managed-ledgers) cache for the sake of performance, *unless* the backlog exceeds the cache size. If the backlog grows too large for the cache, the broker will start reading entries from BookKeeper.
+
+Finally, to support geo-replication on global topics, the broker manages replicators that tail the entries published in the local region and republish them to the remote region using the Pulsar [Java client library](client-libraries-java.md).
+
+> For a guide to managing Pulsar brokers, see the [brokers](admin-api-brokers.md) guide.
+
+## Clusters
+
+A Pulsar instance consists of one or more Pulsar *clusters*. Clusters, in turn, consist of:
+
+* One or more Pulsar [brokers](#brokers)
+* A ZooKeeper quorum used for cluster-level configuration and coordination
+* An ensemble of bookies used for [persistent storage](#persistent-storage) of messages
+
+Clusters can replicate amongst themselves using [geo-replication](concepts-replication.md).
+
+> For a guide to managing Pulsar clusters, see the [clusters](admin-api-clusters.md) guide.
+
+## Metadata store
+
+The Pulsar metadata store maintains all the metadata of a Pulsar cluster, such as topic metadata, schema, broker load data, and so on. Pulsar uses [Apache ZooKeeper](https://zookeeper.apache.org/) for metadata storage, cluster configuration, and coordination. The Pulsar metadata store can be deployed on a separate ZooKeeper cluster or deployed on an existing ZooKeeper cluster. You can use one ZooKeeper cluster for both Pulsar metadata store and [BookKeeper metadata store](https://bookkee [...]
+
+In a Pulsar instance:
+
+* A configuration store quorum stores configuration for tenants, namespaces, and other entities that need to be globally consistent.
+* Each cluster has its own local ZooKeeper ensemble that stores cluster-specific configuration and coordination such as which brokers are responsible for which topics as well as ownership metadata, broker load reports, BookKeeper ledger metadata, and more.
+
+## Configuration store
+
+The configuration store maintains all the configurations of a Pulsar instance, such as clusters, tenants, namespaces, partitioned topic related configurations, and so on. A Pulsar instance can have a single local cluster, multiple local clusters, or multiple cross-region clusters. Consequently, the configuration store can share the configurations across multiple clusters under a Pulsar instance. The configuration store can be deployed on a separate ZooKeeper cluster or deployed on an exi [...]
+
+## Persistent storage
+
+Pulsar provides guaranteed message delivery for applications. If a message successfully reaches a Pulsar broker, it will be delivered to its intended target.
+
+This guarantee requires that non-acknowledged messages are stored in a durable manner until they can be delivered to and acknowledged by consumers. This mode of messaging is commonly called *persistent messaging*. In Pulsar, N copies of all messages are stored and synced on disk, for example 4 copies across two servers with mirrored [RAID](https://en.wikipedia.org/wiki/RAID) volumes on each server.
+
+### Apache BookKeeper
+
+Pulsar uses a system called [Apache BookKeeper](http://bookkeeper.apache.org/) for persistent message storage. BookKeeper is a distributed [write-ahead log](https://en.wikipedia.org/wiki/Write-ahead_logging) (WAL) system that provides a number of crucial advantages for Pulsar:
+
+* It enables Pulsar to utilize many independent logs, called [ledgers](#ledgers). Multiple ledgers can be created for topics over time.
+* It offers very efficient storage for sequential data that handles entry replication.
+* It guarantees read consistency of ledgers in the presence of various system failures.
+* It offers even distribution of I/O across bookies.
+* It's horizontally scalable in both capacity and throughput. Capacity can be immediately increased by adding more bookies to a cluster.
+* Bookies are designed to handle thousands of ledgers with concurrent reads and writes. By using multiple disk devices---one for journal and another for general storage--bookies are able to isolate the effects of read operations from the latency of ongoing write operations.
+
+In addition to message data, *cursors* are also persistently stored in BookKeeper. Cursors are [subscription](reference-terminology.md#subscription) positions for [consumers](reference-terminology.md#consumer). BookKeeper enables Pulsar to store consumer position in a scalable fashion.
+
+At the moment, Pulsar supports persistent message storage. This accounts for the `persistent` in all topic names. Here's an example:
+
+```http
+persistent://my-tenant/my-namespace/my-topic
+```
+
+> Pulsar also supports ephemeral ([non-persistent](concepts-messaging.md#non-persistent-topics)) message storage.
+
+
+You can see an illustration of how brokers and bookies interact in the diagram below:
+
+![Brokers and bookies](/assets/broker-bookie.png)
+
+
+### Ledgers
+
+A ledger is an append-only data structure with a single writer that is assigned to multiple BookKeeper storage nodes, or bookies. Ledger entries are replicated to multiple bookies. Ledgers themselves have very simple semantics:
+
+* A Pulsar broker can create a ledger, append entries to the ledger, and close the ledger.
+* After the ledger has been closed---either explicitly or because the writer process crashed---it can then be opened only in read-only mode.
+* Finally, when entries in the ledger are no longer needed, the whole ledger can be deleted from the system (across all bookies).
+
+#### Ledger read consistency
+
+The main strength of Bookkeeper is that it guarantees read consistency in ledgers in the presence of failures. Since the ledger can only be written to by a single process, that process is free to append entries very efficiently, without need to obtain consensus. After a failure, the ledger will go through a recovery process that will finalize the state of the ledger and establish which entry was last committed to the log. After that point, all readers of the ledger are guaranteed to see  [...]
+
+#### Managed ledgers
+
+Given that Bookkeeper ledgers provide a single log abstraction, a library was developed on top of the ledger called the *managed ledger* that represents the storage layer for a single topic. A managed ledger represents the abstraction of a stream of messages with a single writer that keeps appending at the end of the stream and multiple cursors that are consuming the stream, each with its own associated position.
+
+Internally, a single managed ledger uses multiple BookKeeper ledgers to store the data. There are two reasons to have multiple ledgers:
+
+1. After a failure, a ledger is no longer writable and a new one needs to be created.
+2. A ledger can be deleted when all cursors have consumed the messages it contains. This allows for periodic rollover of ledgers.
+
+### Journal storage
+
+In BookKeeper, *journal* files contain BookKeeper transaction logs. Before making an update to a [ledger](#ledgers), a bookie needs to ensure that a transaction describing the update is written to persistent (non-volatile) storage. A new journal file is created once the bookie starts or the older journal file reaches the journal file size threshold (configured using the [`journalMaxSizeMB`](reference-configuration.md#bookkeeper-journalMaxSizeMB) parameter).
+
+## Pulsar proxy
+
+One way for Pulsar clients to interact with a Pulsar [cluster](#clusters) is by connecting to Pulsar message [brokers](#brokers) directly. In some cases, however, this kind of direct connection is either infeasible or undesirable because the client doesn't have direct access to broker addresses. If you're running Pulsar in a cloud environment or on [Kubernetes](https://kubernetes.io) or an analogous platform, for example, then direct client connections to brokers are likely not possible.
+
+The **Pulsar proxy** provides a solution to this problem by acting as a single gateway for all of the brokers in a cluster. If you run the Pulsar proxy (which, again, is optional), all client connections with the Pulsar cluster will flow through the proxy rather than communicating with brokers.
+
+> For the sake of performance and fault tolerance, you can run as many instances of the Pulsar proxy as you'd like.
+
+Architecturally, the Pulsar proxy gets all the information it requires from ZooKeeper. When starting the proxy on a machine, you only need to provide ZooKeeper connection strings for the cluster-specific and instance-wide configuration store clusters. Here's an example:
+
+```bash
+$ bin/pulsar proxy \
+  --zookeeper-servers zk-0,zk-1,zk-2 \
+  --configuration-store-servers zk-0,zk-1,zk-2
+```
+
+> #### Pulsar proxy docs
+> For documentation on using the Pulsar proxy, see the [Pulsar proxy admin documentation](administration-proxy.md).
+
+
+Some important things to know about the Pulsar proxy:
+
+* Connecting clients don't need to provide *any* specific configuration to use the Pulsar proxy. You won't need to update the client configuration for existing applications beyond updating the IP used for the service URL (for example if you're running a load balancer over the Pulsar proxy).
+* [TLS encryption](security-tls-transport.md) and [authentication](security-tls-authentication.md) is supported by the Pulsar proxy
+
+## Service discovery
+
+[Clients](getting-started-clients.md) connecting to Pulsar brokers need to be able to communicate with an entire Pulsar instance using a single URL. Pulsar provides a built-in service discovery mechanism that you can set up using the instructions in the [Deploying a Pulsar instance](deploy-bare-metal.md#service-discovery-setup) guide.
+
+You can use your own service discovery system if you'd like. If you use your own system, there is just one requirement: when a client performs an HTTP request to an endpoint, such as `http://pulsar.us-west.example.com:8080`, the client needs to be redirected to *some* active broker in the desired cluster, whether via DNS, an HTTP or IP redirect, or some other means.
+
+The diagram below illustrates Pulsar service discovery:
+
+![alt-text](/assets/pulsar-service-discovery.png)
+
+In this diagram, the Pulsar cluster is addressable via a single DNS name: `pulsar-cluster.acme.com`. A [Python client](client-libraries-python.md), for example, could access this Pulsar cluster like this:
+
+```python
+from pulsar import Client
+
+client = Client('pulsar://pulsar-cluster.acme.com:6650')
+```
+
+> **Note**
+> In Pulsar, each topic is handled by only one broker. Initial requests from a client to read, update or delete a topic are sent to a broker that may not be the topic owner. If the broker cannot handle the request for this topic, it redirects the request to the appropriate broker.
diff --git a/site2/website-next/versioned_docs/version-2.8.0/concepts-authentication.md b/site2/website-next/versioned_docs/version-2.8.0/concepts-authentication.md
new file mode 100644
index 0000000..781d068
--- /dev/null
+++ b/site2/website-next/versioned_docs/version-2.8.0/concepts-authentication.md
@@ -0,0 +1,9 @@
+---
+id: concepts-authentication
+title: Authentication and Authorization
+sidebar_label: Authentication and Authorization
+original_id: concepts-authentication
+---
+
+Pulsar supports a pluggable [authentication](security-overview.md) mechanism which can be configured at the proxy and/or the broker. Pulsar also supports a pluggable [authorization](security-authorization.md) mechanism. These mechanisms work together to identify the client and its access rights on topics, namespaces and tenants.
+
diff --git a/site2/website-next/versioned_docs/version-2.8.0/concepts-clients.md b/site2/website-next/versioned_docs/version-2.8.0/concepts-clients.md
new file mode 100644
index 0000000..1ce2865
--- /dev/null
+++ b/site2/website-next/versioned_docs/version-2.8.0/concepts-clients.md
@@ -0,0 +1,85 @@
+---
+id: concepts-clients
+title: Pulsar Clients
+sidebar_label: Clients
+original_id: concepts-clients
+---
+
+Pulsar exposes a client API with language bindings for [Java](client-libraries-java.md), [Go](client-libraries-go.md), [Python](client-libraries-python.md), [C++](client-libraries-cpp.md) and [C#](client-libraries-dotnet.md). The client API optimizes and encapsulates Pulsar's client-broker communication protocol and exposes a simple and intuitive API for use by applications.
+
+Under the hood, the current official Pulsar client libraries support transparent reconnection and/or connection failover to brokers, queuing of messages until acknowledged by the broker, and heuristics such as connection retries with backoff.
+
+> **Custom client libraries**
+> If you'd like to create your own client library, we recommend consulting the documentation on Pulsar's custom [binary protocol](developing-binary-protocol.md).
+
+
+## Client setup phase
+
+Before an application creates a producer/consumer, the Pulsar client library needs to initiate a setup phase including two steps:
+
+1. The client attempts to determine the owner of the topic by sending an HTTP lookup request to the broker. The request could reach one of the active brokers which, by looking at the (cached) zookeeper metadata knows who is serving the topic or, in case nobody is serving it, tries to assign it to the least loaded broker.
+1. Once the client library has the broker address, it creates a TCP connection (or reuse an existing connection from the pool) and authenticates it. Within this connection, client and broker exchange binary commands from a custom protocol. At this point the client sends a command to create producer/consumer to the broker, which will comply after having validated the authorization policy.
+
+Whenever the TCP connection breaks, the client immediately re-initiates this setup phase and keeps trying with exponential backoff to re-establish the producer or consumer until the operation succeeds.
+
+## Reader interface
+
+In Pulsar, the "standard" [consumer interface](concepts-messaging.md#consumers) involves using consumers to listen on [topics](reference-terminology.md#topic), process incoming messages, and finally acknowledge those messages when they are processed. Whenever a new subscription is created, it is initially positioned at the end of the topic (by default), and consumers associated with that subscription begin reading with the first message created afterwards.  Whenever a consumer connects t [...]
+
+The **reader interface** for Pulsar enables applications to manually manage cursors. When you use a reader to connect to a topic---rather than a consumer---you need to specify *which* message the reader begins reading from when it connects to a topic. When connecting to a topic, the reader interface enables you to begin with:
+
+* The **earliest** available message in the topic
+* The **latest** available message in the topic
+* Some other message between the earliest and the latest. If you select this option, you'll need to explicitly provide a message ID. Your application will be responsible for "knowing" this message ID in advance, perhaps fetching it from a persistent data store or cache.
+
+The reader interface is helpful for use cases like using Pulsar to provide effectively-once processing semantics for a stream processing system. For this use case, it's essential that the stream processing system be able to "rewind" topics to a specific message and begin reading there. The reader interface provides Pulsar clients with the low-level abstraction necessary to "manually position" themselves within a topic.
+
+Internally, the reader interface is implemented as a consumer using an exclusive, non-durable subscription to the topic with a randomly-allocated name.
+
+[ **IMPORTANT** ]
+
+Unlike subscription/consumer, readers are non-durable in nature and does not prevent data in a topic from being deleted, thus it is ***strongly*** advised that [data retention](cookbooks-retention-expiry.md) be configured. If data retention for a topic is not configured for an adequate amount of time, messages that the reader has not yet read might be deleted .  This causes the readers to essentially skip messages. Configuring the data retention for a topic guarantees the reader with a c [...]
+
+Please also note that a reader can have a "backlog", but the metric is only used for users to know how behind the reader is. The metric is not considered for any backlog quota calculations. 
+
+![The Pulsar consumer and reader interfaces](/assets/pulsar-reader-consumer-interfaces.png)
+
+Here's a Java example that begins reading from the earliest available message on a topic:
+
+```java
+import org.apache.pulsar.client.api.Message;
+import org.apache.pulsar.client.api.MessageId;
+import org.apache.pulsar.client.api.Reader;
+
+// Create a reader on a topic and for a specific message (and onward)
+Reader<byte[]> reader = pulsarClient.newReader()
+    .topic("reader-api-test")
+    .startMessageId(MessageId.earliest)
+    .create();
+
+while (true) {
+    Message message = reader.readNext();
+
+    // Process the message
+}
+```
+
+To create a reader that reads from the latest available message:
+
+```java
+Reader<byte[]> reader = pulsarClient.newReader()
+    .topic(topic)
+    .startMessageId(MessageId.latest)
+    .create();
+```
+
+To create a reader that reads from some message between the earliest and the latest:
+
+```java
+byte[] msgIdBytes = // Some byte array
+MessageId id = MessageId.fromByteArray(msgIdBytes);
+Reader<byte[]> reader = pulsarClient.newReader()
+    .topic(topic)
+    .startMessageId(id)
+    .create();
+```
diff --git a/site2/website-next/versioned_docs/version-2.8.0/concepts-messaging.md b/site2/website-next/versioned_docs/version-2.8.0/concepts-messaging.md
new file mode 100644
index 0000000..f2a394a
--- /dev/null
+++ b/site2/website-next/versioned_docs/version-2.8.0/concepts-messaging.md
@@ -0,0 +1,555 @@
+---
+id: concepts-messaging
+title: Messaging
+sidebar_label: Messaging
+original_id: concepts-messaging
+---
+
+Pulsar is built on the [publish-subscribe](https://en.wikipedia.org/wiki/Publish%E2%80%93subscribe_pattern) pattern (often abbreviated to pub-sub). In this pattern, [producers](#producers) publish messages to [topics](#topics). [Consumers](#consumers) [subscribe](#subscription-modes) to those topics, process incoming messages, and send an acknowledgement when processing is complete.
+
+When a subscription is created, Pulsar [retains](concepts-architecture-overview.md#persistent-storage) all messages, even if the consumer is disconnected. Retained messages are discarded only when a consumer acknowledges that those messages are processed successfully.
+
+## Messages
+
+Messages are the basic "unit" of Pulsar. The following table lists the components of messages.
+
+Component | Description
+:---------|:-------
+Value / data payload | The data carried by the message. All Pulsar messages contain raw bytes, although message data can also conform to data [schemas](schema-get-started.md).
+Key | Messages are optionally tagged with keys, which is useful for things like [topic compaction](concepts-topic-compaction.md).
+Properties | An optional key/value map of user-defined properties.
+Producer name | The name of the producer who produces the message. If you do not specify a producer name, the default name is used. 
+Sequence ID | Each Pulsar message belongs to an ordered sequence on its topic. The sequence ID of the message is its order in that sequence.
+Publish time | The timestamp of when the message is published. The timestamp is automatically applied by the producer.
+Event time | An optional timestamp attached to a message by applications. For example, applications attach a timestamp on when the message is processed. If nothing is set to event time, the value is `0`. 
+TypedMessageBuilder | It is used to construct a message. You can set message properties such as the message key, message value with `TypedMessageBuilder`. <br /> When you set `TypedMessageBuilder`, set the key as a string. If you set the key as other types, for example, an AVRO object, the key is sent as bytes, and it is difficult to get the AVRO object back on the consumer.
+
+The default size of a message is 5 MB. You can configure the max size of a message with the following configurations.
+
+- In the `broker.conf` file.
+
+    ```bash
+    # The max size of a message (in bytes).
+    maxMessageSize=5242880
+    ```
+
+- In the `bookkeeper.conf` file.
+
+    ```bash
+    # The max size of the netty frame (in bytes). Any messages received larger than this value are rejected. The default value is 5 MB.
+    nettyMaxFrameSizeBytes=5253120
+    ```
+> For more information on Pulsar message contents, see Pulsar [binary protocol](developing-binary-protocol.md).
+
+## Producers
+
+A producer is a process that attaches to a topic and publishes messages to a Pulsar [broker](reference-terminology.md#broker). The Pulsar broker process the messages.
+
+### Send modes
+
+Producers send messages to brokers synchronously (sync) or asynchronously (async).
+
+| Mode       | Description |            
+|:-----------|-----------|
+| Sync send  | The producer waits for an acknowledgement from the broker after sending every message. If the acknowledgment is not received, the producer treats the sending operation as a failure.                                                                                                                                                                                    |
+| Async send | The producer puts a message in a blocking queue and returns immediately. The client library sends the message to the broker in the background. If the queue is full (you can [configure](reference-configuration.md#broker) the maximum size), the producer is blocked or fails immediately when calling the API, depending on arguments passed to the producer. |
+
+### Access mode
+
+You can have different types of access modes on topics for producers.
+
+|Access mode | Description
+|---|---
+`Shared`|Multiple producers can publish on a topic. <br /><br />This is the **default** setting.
+`Exclusive`|Only one producer can publish on a topic. <br /><br />If there is already a producer connected, other producers trying to publish on this topic get errors immediately.<br /><br />The “old” producer is evicted and a “new” producer is selected to be the next exclusive producer if the “old” producer experiences a network partition with the broker.
+`WaitForExclusive`|If there is already a producer connected, the producer creation is pending (rather than timing out) until the producer gets the `Exclusive` access.<br /><br />The producer that succeeds in becoming the exclusive one is treated as the leader. Consequently, if you want to implement the leader election scheme for your application, you can use this access mode.
+
+> **Note**
+>
+> Once an application creates a producer with the `Exclusive` or `WaitForExclusive` access mode successfully, the instance of the application is guaranteed to be the **only one writer** on the topic. Other producers trying to produce on this topic get errors immediately or have to wait until they get the `Exclusive` access. 
+> 
+> For more information, see [PIP 68: Exclusive Producer](https://github.com/apache/pulsar/wiki/PIP-68:-Exclusive-Producer).
+
+You can set producer access mode through Java Client API. For more information, see `ProducerAccessMode` in [ProducerBuilder.java](https://github.com/apache/pulsar/blob/fc5768ca3bbf92815d142fe30e6bfad70a1b4fc6/pulsar-client-api/src/main/java/org/apache/pulsar/client/api/ProducerBuilder.java).
+
+
+### Compression
+
+You can compress messages published by producers during transportation. Pulsar currently supports the following types of compression:
+
+* [LZ4](https://github.com/lz4/lz4)
+* [ZLIB](https://zlib.net/)
+* [ZSTD](https://facebook.github.io/zstd/)
+* [SNAPPY](https://google.github.io/snappy/)
+
+### Batching
+
+When batching is enabled, the producer accumulates and sends a batch of messages in a single request. The batch size is defined by the maximum number of messages and the maximum publish latency. Therefore, the backlog size represents the total number of batches instead of the total number of messages.
+
+In Pulsar, batches are tracked and stored as single units rather than as individual messages. Consumer unbundles a batch into individual messages. However, scheduled messages (configured through the `deliverAt` or the `deliverAfter` parameter) are always sent as individual messages even batching is enabled.
+
+In general, a batch is acknowledged when all of its messages are acknowledged by a consumer. It means unexpected failures, negative acknowledgements, or acknowledgement timeouts can result in redelivery of all messages in a batch, even if some of the messages are acknowledged.
+
+To avoid redelivering acknowledged messages in a batch to the consumer, Pulsar introduces batch index acknowledgement since Pulsar 2.6.0. When batch index acknowledgement is enabled, the consumer filters out the batch index that has been acknowledged and sends the batch index acknowledgement request to the broker. The broker maintains the batch index acknowledgement status and tracks the acknowledgement status of each batch index to avoid dispatching acknowledged messages to the consumer [...]
+
+By default, batch index acknowledgement is disabled (`acknowledgmentAtBatchIndexLevelEnabled=false`). You can enable batch index acknowledgement by setting the `acknowledgmentAtBatchIndexLevelEnabled` parameter to `true` at the broker side. Enabling batch index acknowledgement results in more memory overheads. 
+
+### Chunking
+When you enable chunking, read the following instructions.
+- Batching and chunking cannot be enabled simultaneously. To enable chunking, you must disable batching in advance.
+- Chunking is only supported for persisted topics.
+- Chunking is only supported for the exclusive and failover subscription modes.
+
+When chunking is enabled (`chunkingEnabled=true`), if the message size is greater than the allowed maximum publish-payload size, the producer splits the original message into chunked messages and publishes them with chunked metadata to the broker separately and in order. At the broker side, the chunked messages are stored in the managed-ledger in the same way as that of ordinary messages. The only difference is that the consumer needs to buffer the chunked messages and combines them into [...]
+
+The consumer consumes the chunked messages and buffers them until the consumer receives all the chunks of a message. And then the consumer stitches chunked messages together and places them into the receiver-queue. Clients consume messages from the receiver-queue. Once the consumer consumes the entire large message and acknowledges it, the consumer internally sends acknowledgement of all the chunk messages associated to that large message. You can set the `maxPendingChunkedMessage` param [...]
+
+The broker does not require any changes to support chunking for non-shared subscription. The broker only uses `chunkedMessageRate` to record chunked message rate on the topic.
+
+#### Handle chunked messages with one producer and one ordered consumer
+
+As shown in the following figure, when a topic has one producer which publishes large message payload in chunked messages along with regular non-chunked messages. The producer publishes message M1 in three chunks M1-C1, M1-C2 and M1-C3. The broker stores all the three chunked messages in the managed-ledger and dispatches to the ordered (exclusive/failover) consumer in the same order. The consumer buffers all the chunked messages in memory until it receives all the chunked messages, combi [...]
+
+![](/assets/chunking-01.png)
+
+#### Handle chunked messages with multiple producers and one ordered consumer
+
+When multiple publishers publish chunked messages into a single topic, the broker stores all the chunked messages coming from different publishers in the same managed-ledger. As shown below, Producer 1 publishes message M1 in three chunks M1-C1, M1-C2 and M1-C3. Producer 2 publishes message M2 in three chunks M2-C1, M2-C2 and M2-C3. All chunked messages of the specific message are still in order but might not be consecutive in the managed-ledger. This brings some memory pressure to the c [...]
+
+![](/assets/chunking-02.png)
+
+## Consumers
+
+A consumer is a process that attaches to a topic via a subscription and then receives messages.
+
+A consumer sends a [flow permit request](developing-binary-protocol.md#flow-control) to a broker to get messages. There is a queue at the consumer side to receive messages pushed from the broker. You can configure the queue size with the [`receiverQueueSize`](client-libraries-java.md#configure-consumer) parameter. The default size is `1000`). Each time `consumer.receive()` is called, a message is dequeued from the buffer.  
+
+### Receive modes
+
+Messages are received from [brokers](reference-terminology.md#broker) either synchronously (sync) or asynchronously (async).
+
+| Mode          | Description                                                                                                                                                                                                   |
+|:--------------|:--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| Sync receive  | A sync receive is blocked until a message is available.                                                                                                                                                  |
+| Async receive | An async receive returns immediately with a future value—for example, a [`CompletableFuture`](http://www.baeldung.com/java-completablefuture) in Java—that completes once a new message is available. |
+
+### Listeners
+
+Client libraries provide listener implementation for consumers. For example, the [Java client](client-libraries-java.md) provides a {@inject: javadoc:MesssageListener:/client/org/apache/pulsar/client/api/MessageListener} interface. In this interface, the `received` method is called whenever a new message is received.
+
+### Acknowledgement
+
+When a consumer consumes a message successfully, the consumer sends an acknowledgement request to the broker. This message is permanently stored, and then deleted only after all the subscriptions have acknowledged it. If you want to store the message that has been acknowledged by a consumer, you need to configure the [message retention policy](concepts-messaging.md#message-retention-and-expiry).
+
+For a batch message, if batch index acknowledgement is enabled, the broker maintains the batch index acknowledgement status and tracks the acknowledgement status of each batch index to avoid dispatching acknowledged messages to the consumer. When all indexes of the batch message are acknowledged, the batch message is deleted. For details about the batch index acknowledgement, see [batching](#batching).
+
+Messages can be acknowledged in the following two ways:
+
+- Messages are acknowledged individually. With individual acknowledgement, the consumer needs to acknowledge each message and sends an acknowledgement request to the broker.
+- Messages are acknowledged cumulatively. With cumulative acknowledgement, the consumer only needs to acknowledge the last message it received. All messages in the stream up to (and including) the provided message are not re-delivered to that consumer.
+
+> **Note**
+> Cumulative acknowledgement cannot be used in the [shared subscription mode](#subscription-modes), because the shared subscription mode involves multiple consumers who have access to the same subscription. In the shared subscription mode, messages are acknowledged individually.
+
+### Negative acknowledgement
+
+When a consumer does not consume a message successfully at a time, and wants to consume the message again, the consumer sends a negative acknowledgement to the broker, and then the broker redelivers the message.
+
+Messages are negatively acknowledged one by one or cumulatively, which depends on the consumption subscription mode.
+
+In the exclusive and failover subscription modes, consumers only negatively acknowledge the last message they receive.
+
+In the shared and Key_Shared subscription modes, you can negatively acknowledge messages individually.
+
+Be aware that negative acknowledgment on ordered subscription types, such as Exclusive, Failover and Key_Shared, can cause failed messages to arrive consumers out of the original order.
+
+> **Note**
+> If batching is enabled, other messages and the negatively acknowledged messages in the same batch are redelivered to the consumer.
+
+### Acknowledgement timeout
+
+If a message is not consumed successfully, and you want to trigger the broker to redeliver the message automatically, you can adopt the unacknowledged message automatic re-delivery mechanism. Client tracks the unacknowledged messages within the entire `acktimeout` time range, and sends a `redeliver unacknowledged messages` request to the broker automatically when the acknowledgement timeout is specified.
+
+> **Note**
+> If batching is enabled, other messages and the unacknowledged messages in the same batch are redelivered to the consumer.
+
+> **Note**    
+> Prefer negative acknowledgements over acknowledgement timeout. Negative acknowledgement controls the re-delivery of individual messages with more precision, and avoids invalid redeliveries when the message processing time exceeds the acknowledgement timeout.
+
+### Dead letter topic
+
+Dead letter topic enables you to consume new messages when some messages cannot be consumed successfully by a consumer. In this mechanism, messages that are failed to be consumed are stored in a separate topic, which is called dead letter topic. You can decide how to handle messages in the dead letter topic.
+
+The following example shows how to enable dead letter topic in a Java client using the default dead letter topic:
+
+```java
+Consumer<byte[]> consumer = pulsarClient.newConsumer(Schema.BYTES)
+              .topic(topic)
+              .subscriptionName("my-subscription")
+              .subscriptionType(SubscriptionType.Shared)
+              .deadLetterPolicy(DeadLetterPolicy.builder()
+                    .maxRedeliverCount(maxRedeliveryCount)
+                    .build())
+              .subscribe();
+                
+```
+The default dead letter topic uses this format: 
+```
+<topicname>-<subscriptionname>-DLQ
+```
+  
+If you want to specify the name of the dead letter topic, use this Java client example:
+
+```java
+Consumer<byte[]> consumer = pulsarClient.newConsumer(Schema.BYTES)
+              .topic(topic)
+              .subscriptionName("my-subscription")
+              .subscriptionType(SubscriptionType.Shared)
+              .deadLetterPolicy(DeadLetterPolicy.builder()
+                    .maxRedeliverCount(maxRedeliveryCount)
+                    .deadLetterTopic("your-topic-name")
+                    .build())
+              .subscribe();
+                
+```
+  
+Dead letter topic depends on message re-delivery. Messages are redelivered either due to [acknowledgement timeout](#acknowledgement-timeout) or [negative acknowledgement](#negative-acknowledgement). If you are going to use negative acknowledgement on a message, make sure it is negatively acknowledged before the acknowledgement timeout. 
+
+> **Note**    
+> Currently, dead letter topic is enabled in the Shared and Key_Shared subscription modes.
+
+### Retry letter topic
+
+For many online business systems, a message is re-consumed due to exception occurs in the business logic processing. To configure the delay time for re-consuming the failed messages, you can configure the producer to send messages to both the business topic and the retry letter topic, and enable automatic retry on the consumer. When automatic retry is enabled on the consumer, a message is stored in the retry letter topic if the messages are not consumed, and therefore the consumer automa [...]
+
+By default, automatic retry is disabled. You can set `enableRetry` to `true` to enable automatic retry on the consumer.
+
+This example shows how to consume messages from a retry letter topic.
+
+```java
+Consumer<byte[]> consumer = pulsarClient.newConsumer(Schema.BYTES)
+                .topic(topic)
+                .subscriptionName("my-subscription")
+                .subscriptionType(SubscriptionType.Shared)
+                .enableRetry(true)
+                .receiverQueueSize(100)
+                .deadLetterPolicy(DeadLetterPolicy.builder()
+                        .maxRedeliverCount(maxRedeliveryCount)
+                        .retryLetterTopic("persistent://my-property/my-ns/my-subscription-custom-Retry")
+                        .build())
+                .subscriptionInitialPosition(SubscriptionInitialPosition.Earliest)
+                .subscribe();
+```
+
+## Topics
+
+As in other pub-sub systems, topics in Pulsar are named channels for transmitting messages from producers to consumers. Topic names are URLs that have a well-defined structure:
+
+```http
+{persistent|non-persistent}://tenant/namespace/topic
+```
+
+Topic name component | Description
+:--------------------|:-----------
+`persistent` / `non-persistent` | This identifies the type of topic. Pulsar supports two kind of topics: [persistent](concepts-architecture-overview.md#persistent-storage) and [non-persistent](#non-persistent-topics). The default is persistent, so if you do not specify a type, the topic is persistent. With persistent topics, all messages are durably persisted on disks (if the broker is not standalone, messages are durably persisted on multiple disks), whereas data for non-persistent topi [...]
+`tenant`             | The topic tenant within the instance. Tenants are essential to multi-tenancy in Pulsar, and spread across clusters.
+`namespace`          | The administrative unit of the topic, which acts as a grouping mechanism for related topics. Most topic configuration is performed at the [namespace](#namespaces) level. Each tenant has one or multiple namespaces.
+`topic`              | The final part of the name. Topic names have no special meaning in a Pulsar instance.
+
+> **No need to explicitly create new topics**
+> You do not need to explicitly create topics in Pulsar. If a client attempts to write or receive messages to/from a topic that does not yet exist, Pulsar creates that topic under the namespace provided in the [topic name](#topics) automatically.
+> If no tenant or namespace is specified when a client creates a topic, the topic is created in the default tenant and namespace. You can also create a topic in a specified tenant and namespace, such as `persistent://my-tenant/my-namespace/my-topic`. `persistent://my-tenant/my-namespace/my-topic` means the `my-topic` topic is created in the `my-namespace` namespace of the `my-tenant` tenant.
+
+## Namespaces
+
+A namespace is a logical nomenclature within a tenant. A tenant creates multiple namespaces via the [admin API](admin-api-namespaces.md#create). For instance, a tenant with different applications can create a separate namespace for each application. A namespace allows the application to create and manage a hierarchy of topics. The topic `my-tenant/app1` is a namespace for the application `app1` for `my-tenant`. You can create any number of [topics](#topics) under the namespace.
+
+## Subscriptions
+
+A subscription is a named configuration rule that determines how messages are delivered to consumers. Four subscription modes are available in Pulsar: [exclusive](#exclusive), [shared](#shared), [failover](#failover), and [key_shared](#key_shared). These modes are illustrated in the figure below.
+
+![Subscription modes](/assets/pulsar-subscription-modes.png)
+
+> **Pub-Sub or Queuing**
+> In Pulsar, you can use different subscriptions flexibly.
+> * If you want to achieve traditional "fan-out pub-sub messaging" among consumers, specify a unique subscription name for each consumer. It is exclusive subscription mode.
+> * If you want to achieve "message queuing" among consumers, share the same subscription name among multiple consumers(shared, failover, key_shared).
+> * If you want to achieve both effects simultaneously, combine exclusive subscription mode with other subscription modes for consumers.
+
+### Consumerless Subscriptions and Their Corresponding Modes
+When a subscription has no consumers, its subscription mode is undefined. A subscription's mode is defined when a consumer connects to the subscription, and the mode can be changed by restarting all consumers with a different configuration.
+
+### Exclusive
+
+In *exclusive* mode, only a single consumer is allowed to attach to the subscription. If multiple consumers subscribe to a topic using the same subscription, an error occurs.
+
+In the diagram below, only **Consumer A-0** is allowed to consume messages.
+
+> Exclusive mode is the default subscription mode.
+
+![Exclusive subscriptions](/assets/pulsar-exclusive-subscriptions.png)
+
+### Failover
+
+In *failover* mode, multiple consumers can attach to the same subscription. A master consumer is picked for non-partitioned topic or each partition of partitioned topic and receives messages. When the master consumer disconnects, all (non-acknowledged and subsequent) messages are delivered to the next consumer in line.
+
+For partitioned topics, broker will sort consumers by priority level and lexicographical order of consumer name. Then broker will try to evenly assigns topics to consumers with the highest priority level.
+
+For non-partitioned topic, broker will pick consumer in the order they subscribe to the non partitioned topic.
+
+In the diagram below, **Consumer-B-0** is the master consumer while **Consumer-B-1** would be the next consumer in line to receive messages if **Consumer-B-0** is disconnected.
+
+![Failover subscriptions](/assets/pulsar-failover-subscriptions.png)
+
+### Shared
+
+In *shared* or *round robin* mode, multiple consumers can attach to the same subscription. Messages are delivered in a round robin distribution across consumers, and any given message is delivered to only one consumer. When a consumer disconnects, all the messages that were sent to it and not acknowledged will be rescheduled for sending to the remaining consumers.
+
+In the diagram below, **Consumer-C-1** and **Consumer-C-2** are able to subscribe to the topic, but **Consumer-C-3** and others could as well.
+
+> **Limitations of shared mode**
+> When using shared mode, be aware that:
+> * Message ordering is not guaranteed.
+> * You cannot use cumulative acknowledgment with shared mode.
+
+![Shared subscriptions](/assets/pulsar-shared-subscriptions.png)
+
+### Key_Shared
+
+In *Key_Shared* mode, multiple consumers can attach to the same subscription. Messages are delivered in a distribution across consumers and message with same key or same ordering key are delivered to only one consumer. No matter how many times the message is re-delivered, it is delivered to the same consumer. When a consumer connected or disconnected will cause served consumer change for some key of message.
+
+> **Limitations of Key_Shared mode**
+> When you use Key_Shared mode, be aware that:
+> * You need to specify a key or orderingKey for messages.
+> * You cannot use cumulative acknowledgment with Key_Shared mode.
+> * Your producers should disable batching or use a key-based batch builder.
+
+![Key_Shared subscriptions](/assets/pulsar-key-shared-subscriptions.png)
+
+**You can disable Key_Shared subscription in the `broker.config` file.**
+
+## Multi-topic subscriptions
+
+When a consumer subscribes to a Pulsar topic, by default it subscribes to one specific topic, such as `persistent://public/default/my-topic`. As of Pulsar version 1.23.0-incubating, however, Pulsar consumers can simultaneously subscribe to multiple topics. You can define a list of topics in two ways:
+
+* On the basis of a [**reg**ular **ex**pression](https://en.wikipedia.org/wiki/Regular_expression) (regex), for example `persistent://public/default/finance-.*`
+* By explicitly defining a list of topics
+
+> When subscribing to multiple topics by regex, all topics must be in the same [namespace](#namespaces).
+
+When subscribing to multiple topics, the Pulsar client automatically makes a call to the Pulsar API to discover the topics that match the regex pattern/list, and then subscribe to all of them. If any of the topics do not exist, the consumer auto-subscribes to them once the topics are created.
+
+> **No ordering guarantees across multiple topics**
+> When a producer sends messages to a single topic, all messages are guaranteed to be read from that topic in the same order. However, these guarantees do not hold across multiple topics. So when a producer sends message to multiple topics, the order in which messages are read from those topics is not guaranteed to be the same.
+
+The following are multi-topic subscription examples for Java.
+
+```java
+import java.util.regex.Pattern;
+
+import org.apache.pulsar.client.api.Consumer;
+import org.apache.pulsar.client.api.PulsarClient;
+
+PulsarClient pulsarClient = // Instantiate Pulsar client object
+
+// Subscribe to all topics in a namespace
+Pattern allTopicsInNamespace = Pattern.compile("persistent://public/default/.*");
+Consumer<byte[]> allTopicsConsumer = pulsarClient.newConsumer()
+                .topicsPattern(allTopicsInNamespace)
+                .subscriptionName("subscription-1")
+                .subscribe();
+
+// Subscribe to a subsets of topics in a namespace, based on regex
+Pattern someTopicsInNamespace = Pattern.compile("persistent://public/default/foo.*");
+Consumer<byte[]> someTopicsConsumer = pulsarClient.newConsumer()
+                .topicsPattern(someTopicsInNamespace)
+                .subscriptionName("subscription-1")
+                .subscribe();
+```
+
+For code examples, see [Java](client-libraries-java.md#multi-topic-subscriptions).
+
+## Partitioned topics
+
+Normal topics are served only by a single broker, which limits the maximum throughput of the topic. *Partitioned topics* are a special type of topic that are handled by multiple brokers, thus allowing for higher throughput.
+
+A partitioned topic is actually implemented as N internal topics, where N is the number of partitions. When publishing messages to a partitioned topic, each message is routed to one of several brokers. The distribution of partitions across brokers is handled automatically by Pulsar.
+
+The diagram below illustrates this:
+
+![](/assets/partitioning.png)
+
+The **Topic1** topic has five partitions (**P0** through **P4**) split across three brokers. Because there are more partitions than brokers, two brokers handle two partitions a piece, while the third handles only one (again, Pulsar handles this distribution of partitions automatically).
+
+Messages for this topic are broadcast to two consumers. The [routing mode](#routing-modes) determines each message should be published to which partition, while the [subscription mode](#subscription-modes) determines which messages go to which consumers.
+
+Decisions about routing and subscription modes can be made separately in most cases. In general, throughput concerns should guide partitioning/routing decisions while subscription decisions should be guided by application semantics.
+
+There is no difference between partitioned topics and normal topics in terms of how subscription modes work, as partitioning only determines what happens between when a message is published by a producer and processed and acknowledged by a consumer.
+
+Partitioned topics need to be explicitly created via the [admin API](admin-api-overview.md). The number of partitions can be specified when creating the topic.
+
+### Routing modes
+
+When publishing to partitioned topics, you must specify a *routing mode*. The routing mode determines which partition---that is, which internal topic---each message should be published to.
+
+There are three {@inject: javadoc:MessageRoutingMode:/client/org/apache/pulsar/client/api/MessageRoutingMode} available:
+
+Mode     | Description 
+:--------|:------------
+`RoundRobinPartition` | If no key is provided, the producer will publish messages across all partitions in round-robin fashion to achieve maximum throughput. Please note that round-robin is not done per individual message but rather it's set to the same boundary of batching delay, to ensure batching is effective. While if a key is specified on the message, the partitioned producer will hash the key and assign message to a particular partition. This is the default mode. 
+`SinglePartition`     | If no key is provided, the producer will randomly pick one single partition and publish all the messages into that partition. While if a key is specified on the message, the partitioned producer will hash the key and assign message to a particular partition.
+`CustomPartition`     | Use custom message router implementation that will be called to determine the partition for a particular message. User can create a custom routing mode by using the [Java client](client-libraries-java.md) and implementing the {@inject: javadoc:MessageRouter:/client/org/apache/pulsar/client/api/MessageRouter} interface.
+
+### Ordering guarantee
+
+The ordering of messages is related to MessageRoutingMode and Message Key. Usually, user would want an ordering of Per-key-partition guarantee.
+
+If there is a key attached to message, the messages will be routed to corresponding partitions based on the hashing scheme specified by {@inject: javadoc:HashingScheme:/client/org/apache/pulsar/client/api/HashingScheme} in {@inject: javadoc:ProducerBuilder:/client/org/apache/pulsar/client/api/ProducerBuilder}, when using either `SinglePartition` or `RoundRobinPartition` mode.
+
+Ordering guarantee | Description | Routing Mode and Key
+:------------------|:------------|:------------
+Per-key-partition  | All the messages with the same key will be in order and be placed in same partition. | Use either `SinglePartition` or `RoundRobinPartition` mode, and Key is provided by each message.
+Per-producer       | All the messages from the same producer will be in order. | Use `SinglePartition` mode, and no Key is provided for each message.
+
+### Hashing scheme
+
+{@inject: javadoc:HashingScheme:/client/org/apache/pulsar/client/api/HashingScheme} is an enum that represent sets of standard hashing functions available when choosing the partition to use for a particular message.
+
+There are 2 types of standard hashing functions available: `JavaStringHash` and `Murmur3_32Hash`. 
+The default hashing function for producer is `JavaStringHash`.
+Please pay attention that `JavaStringHash` is not useful when producers can be from different multiple language clients, under this use case, it is recommended to use `Murmur3_32Hash`.
+
+
+
+## Non-persistent topics
+
+
+By default, Pulsar persistently stores *all* unacknowledged messages on multiple [BookKeeper](concepts-architecture-overview.md#persistent-storage) bookies (storage nodes). Data for messages on persistent topics can thus survive broker restarts and subscriber failover.
+
+Pulsar also, however, supports **non-persistent topics**, which are topics on which messages are *never* persisted to disk and live only in memory. When using non-persistent delivery, killing a Pulsar broker or disconnecting a subscriber to a topic means that all in-transit messages are lost on that (non-persistent) topic, meaning that clients may see message loss.
+
+Non-persistent topics have names of this form (note the `non-persistent` in the name):
+
+```http
+non-persistent://tenant/namespace/topic
+```
+
+> For more info on using non-persistent topics, see the [Non-persistent messaging cookbook](cookbooks-non-persistent.md).
+
+In non-persistent topics, brokers immediately deliver messages to all connected subscribers *without persisting them* in [BookKeeper](concepts-architecture-overview.md#persistent-storage). If a subscriber is disconnected, the broker will not be able to deliver those in-transit messages, and subscribers will never be able to receive those messages again. Eliminating the persistent storage step makes messaging on non-persistent topics slightly faster than on persistent topics in some cases [...]
+
+> With non-persistent topics, message data lives only in memory. If a message broker fails or message data can otherwise not be retrieved from memory, your message data may be lost. Use non-persistent topics only if you're *certain* that your use case requires it and can sustain it.
+
+By default, non-persistent topics are enabled on Pulsar brokers. You can disable them in the broker's [configuration](reference-configuration.md#broker-enableNonPersistentTopics). You can manage non-persistent topics using the `pulsar-admin topics` command. For more information, see [`pulsar-admin`](https://pulsar.apache.org/tools/pulsar-admin/).
+
+### Performance
+
+Non-persistent messaging is usually faster than persistent messaging because brokers don't persist messages and immediately send acks back to the producer as soon as that message is delivered to connected brokers. Producers thus see comparatively low publish latency with non-persistent topic.
+
+### Client API
+
+Producers and consumers can connect to non-persistent topics in the same way as persistent topics, with the crucial difference that the topic name must start with `non-persistent`. All three subscription modes---[exclusive](#exclusive), [shared](#shared), and [failover](#failover)---are supported for non-persistent topics.
+
+Here's an example [Java consumer](client-libraries-java.md#consumers) for a non-persistent topic:
+
+```java
+PulsarClient client = PulsarClient.builder()
+        .serviceUrl("pulsar://localhost:6650")
+        .build();
+String npTopic = "non-persistent://public/default/my-topic";
+String subscriptionName = "my-subscription-name";
+
+Consumer<byte[]> consumer = client.newConsumer()
+        .topic(npTopic)
+        .subscriptionName(subscriptionName)
+        .subscribe();
+```
+
+Here's an example [Java producer](client-libraries-java.md#producer) for the same non-persistent topic:
+
+```java
+Producer<byte[]> producer = client.newProducer()
+                .topic(npTopic)
+                .create();
+```
+
+## Message retention and expiry
+
+By default, Pulsar message brokers:
+
+* immediately delete *all* messages that have been acknowledged by a consumer, and
+* [persistently store](concepts-architecture-overview.md#persistent-storage) all unacknowledged messages in a message backlog.
+
+Pulsar has two features, however, that enable you to override this default behavior:
+
+* Message **retention** enables you to store messages that have been acknowledged by a consumer
+* Message **expiry** enables you to set a time to live (TTL) for messages that have not yet been acknowledged
+
+> All message retention and expiry is managed at the [namespace](#namespaces) level. For a how-to, see the [Message retention and expiry](cookbooks-retention-expiry.md) cookbook.
+
+The diagram below illustrates both concepts:
+
+![Message retention and expiry](/assets/retention-expiry.png)
+
+With message retention, shown at the top, a <span style={{color: " #89b557"}}>retention policy</span> applied to all topics in a namespace dictates that some messages are durably stored in Pulsar even though they've already been acknowledged. Acknowledged messages that are not covered by the retention policy are <span style={{color: " #bb3b3e"}}>deleted</span>. Without a retention policy, *all* of the <span style={{color: " #19967d"}}>acknowledged messages</span> would be deleted.
+
+With message expiry, shown at the bottom, some messages are <span style={{color: " #bb3b3e"}}>deleted</span>, even though they <span style={{color: " #337db6"}}>haven't been acknowledged</span>, because they've expired according to the <span style={{color: " #e39441"}}>TTL applied to the namespace</span> (for example because a TTL of 5 minutes has been applied and the messages haven't been acknowledged but are 10 minutes old).
+
+## Message deduplication
+
+Message duplication occurs when a message is [persisted](concepts-architecture-overview.md#persistent-storage) by Pulsar more than once. Message deduplication is an optional Pulsar feature that prevents unnecessary message duplication by processing each message only once, even if the message is received more than once.
+
+The following diagram illustrates what happens when message deduplication is disabled vs. enabled:
+
+![Pulsar message deduplication](/assets/message-deduplication.png)
+
+
+Message deduplication is disabled in the scenario shown at the top. Here, a producer publishes message 1 on a topic; the message reaches a Pulsar broker and is [persisted](concepts-architecture-overview.md#persistent-storage) to BookKeeper. The producer then sends message 1 again (in this case due to some retry logic), and the message is received by the broker and stored in BookKeeper again, which means that duplication has occurred.
+
+In the second scenario at the bottom, the producer publishes message 1, which is received by the broker and persisted, as in the first scenario. When the producer attempts to publish the message again, however, the broker knows that it has already seen message 1 and thus does not persist the message.
+
+> Message deduplication is handled at the namespace level or the topic level. For more instructions, see the [message deduplication cookbook](cookbooks-deduplication.md).
+
+
+### Producer idempotency
+
+The other available approach to message deduplication is to ensure that each message is *only produced once*. This approach is typically called **producer idempotency**. The drawback of this approach is that it defers the work of message deduplication to the application. In Pulsar, this is handled at the [broker](reference-terminology.md#broker) level, so you do not need to modify your Pulsar client code. Instead, you only need to make administrative changes. For details, see [Managing m [...]
+
+### Deduplication and effectively-once semantics
+
+Message deduplication makes Pulsar an ideal messaging system to be used in conjunction with stream processing engines (SPEs) and other systems seeking to provide effectively-once processing semantics. Messaging systems that do not offer automatic message deduplication require the SPE or other system to guarantee deduplication, which means that strict message ordering comes at the cost of burdening the application with the responsibility of deduplication. With Pulsar, strict ordering guar [...]
+
+> You can find more in-depth information in [this post](https://www.splunk.com/en_us/blog/it/exactly-once-is-not-exactly-the-same.html).
+
+## Delayed message delivery
+Delayed message delivery enables you to consume a message later rather than immediately. In this mechanism, a message is stored in BookKeeper, `DelayedDeliveryTracker` maintains the time index(time -> messageId) in memory after published to a broker, and it is delivered to a consumer once the specific delayed time is passed.  
+
+Delayed message delivery only works in Shared subscription mode. In Exclusive and Failover subscription modes, the delayed message is dispatched immediately.
+
+The diagram below illustrates the concept of delayed message delivery:
+
+![Delayed Message Delivery](/assets/message_delay.png)
+
+A broker saves a message without any check. When a consumer consumes a message, if the message is set to delay, then the message is added to `DelayedDeliveryTracker`. A subscription checks and gets timeout messages from `DelayedDeliveryTracker`.
+
+### Broker 
+Delayed message delivery is enabled by default. You can change it in the broker configuration file as below:
+
+```
+# Whether to enable the delayed delivery for messages.
+# If disabled, messages are immediately delivered and there is no tracking overhead.
+delayedDeliveryEnabled=true
+
+# Control the ticking time for the retry of delayed message delivery,
+# affecting the accuracy of the delivery time compared to the scheduled time.
+# Default is 1 second.
+delayedDeliveryTickTimeMillis=1000
+```
+
+### Producer 
+The following is an example of delayed message delivery for a producer in Java:
+```java
+// message to be delivered at the configured delay interval
+producer.newMessage().deliverAfter(3L, TimeUnit.Minute).value("Hello Pulsar!").send();
+```
diff --git a/site2/website-next/versioned_docs/version-2.8.0/concepts-multi-tenancy.md b/site2/website-next/versioned_docs/version-2.8.0/concepts-multi-tenancy.md
new file mode 100644
index 0000000..15c802f
--- /dev/null
+++ b/site2/website-next/versioned_docs/version-2.8.0/concepts-multi-tenancy.md
@@ -0,0 +1,55 @@
+---
+id: concepts-multi-tenancy
+title: Multi Tenancy
+sidebar_label: Multi Tenancy
+original_id: concepts-multi-tenancy
+---
+
+Pulsar was created from the ground up as a multi-tenant system. To support multi-tenancy, Pulsar has a concept of tenants. Tenants can be spread across clusters and can each have their own [authentication and authorization](security-overview.md) scheme applied to them. They are also the administrative unit at which storage quotas, [message TTL](cookbooks-retention-expiry.md#time-to-live-ttl), and isolation policies can be managed.
+
+The multi-tenant nature of Pulsar is reflected mostly visibly in topic URLs, which have this structure:
+
+```http
+persistent://tenant/namespace/topic
+```
+
+As you can see, the tenant is the most basic unit of categorization for topics (more fundamental than the namespace and topic name).
+
+## Tenants
+
+To each tenant in a Pulsar instance you can assign:
+
+* An [authorization](security-authorization.md) scheme
+* The set of [clusters](reference-terminology.md#cluster) to which the tenant's configuration applies
+
+## Namespaces
+
+Tenants and namespaces are two key concepts of Pulsar to support multi-tenancy.
+
+* Pulsar is provisioned for specified tenants with appropriate capacity allocated to the tenant.
+* A namespace is the administrative unit nomenclature within a tenant. The configuration policies set on a namespace apply to all the topics created in that namespace. A tenant may create multiple namespaces via self-administration using the REST API and the [`pulsar-admin`](reference-pulsar-admin.md) CLI tool. For instance, a tenant with different applications can create a separate namespace for each application.
+
+Names for topics in the same namespace will look like this:
+
+```http
+persistent://tenant/app1/topic-1
+
+persistent://tenant/app1/topic-2
+
+persistent://tenant/app1/topic-3
+```
+
+### Namespace change events and topic-level policies
+
+Pulsar is a multi-tenant event streaming system. Administrators can manage the tenants and namespaces by setting policies at different levels. However, the policies, such as retention policy and storage quota policy, are only available at a namespace level. In many use cases, users need to set a policy at the topic level. The namespace change events approach is proposed for supporting topic-level policies in an efficient way. In this approach, Pulsar is used as an event log to store name [...]
+
+- Avoid using ZooKeeper and introduce more loads to ZooKeeper.
+- Use Pulsar as an event log for propagating the policy cache. It can scale efficiently.
+- Use Pulsar SQL to query the namespace changes and audit the system.
+
+Each namespace has a system topic `__change_events`. This system topic is used for storing change events for a given namespace. The following figure illustrates how to use namespace change events to implement a topic-level policy.
+
+1. Pulsar Admin clients communicate with the Admin Restful API to update topic level policies.
+2. Any broker that receives the Admin HTTP request publishes a topic policy change event to the corresponding `__change_events` topic of the namespace.
+3. Each broker that owns a namespace bundle(s) subscribes to the `__change_events` topic to receive change events of the namespace. It then applies the change events to the policy cache.
+4. Once the policy cache is updated, the broker sends the response back to the Pulsar Admin clients.
diff --git a/site2/website-next/versioned_docs/version-2.8.0/concepts-multiple-advertised-listeners.md b/site2/website-next/versioned_docs/version-2.8.0/concepts-multiple-advertised-listeners.md
new file mode 100644
index 0000000..8d49182
--- /dev/null
+++ b/site2/website-next/versioned_docs/version-2.8.0/concepts-multiple-advertised-listeners.md
@@ -0,0 +1,39 @@
+---
+id: concepts-multiple-advertised-listeners
+title: Multiple advertised listeners
+sidebar_label: Multiple advertised listeners
+original_id: concepts-multiple-advertised-listeners
+---
+
+When a Pulsar cluster is deployed in the production environment, it may require to expose multiple advertised addresses for the broker. For example, when you deploy a Pulsar cluster in Kubernetes and want other clients, which are not in the same Kubernetes cluster, to connect to the Pulsar cluster, you need to assign a broker URL to external clients. But clients in the same Kubernetes cluster can still connect to the Pulsar cluster through the internal network of Kubernetes.
+
+## Advertised listeners
+
+To ensure clients in both internal and external networks can connect to a Pulsar cluster, Pulsar introduces `advertisedListeners` and `internalListenerName` configuration options into the [broker configuration file](reference-configuration.md#broker) to ensure that the broker supports exposing multiple advertised listeners and support the separation of internal and external network traffic.
+
+- The `advertisedListeners` is used to specify multiple advertised listeners. The broker uses the listener as the broker identifier in the load manager and the bundle owner data. The `advertisedListeners` is formatted as `<listener_name>:pulsar://<host>:<port>, <listener_name>:pulsar+ssl://<host>:<port>`. You can set up the `advertisedListeners` like
+`advertisedListeners=internal:pulsar://192.168.1.11:6660,internal:pulsar+ssl://192.168.1.11:6651`.
+
+- The `internalListenerName` is used to specify the internal service URL that the broker uses. You can specify the `internalListenerName` by choosing one of the `advertisedListeners`. The broker uses the listener name of the first advertised listener as the `internalListenerName` if the `internalListenerName` is absent.
+
+After setting up the `advertisedListeners`, clients can choose one of the listeners as the service URL to create a connection to the broker as long as the network is accessible. However, if the client creates producers or consumer on a topic, the client must send a lookup requests to the broker for getting the owner broker, then connect to the owner broker to publish messages or consume messages. Therefore, You must allow the client to get the corresponding service URL with the same adve [...]
+
+## Use multiple advertised listeners
+
+This example shows how a Pulsar client uses multiple advertised listeners.
+
+1. Configure multiple advertised listeners in the broker configuration file.
+
+```shell
+advertisedListeners={listenerName}:pulsar://xxxx:6650,
+{listenerName}:pulsar+ssl://xxxx:6651
+```
+
+2. Specify the listener name for the client.
+
+```java
+PulsarClient client = PulsarClient.builder()
+    .serviceUrl("pulsar://xxxx:6650")
+    .listenerName("external")
+    .build();
+```
\ No newline at end of file
diff --git a/site2/website-next/versioned_docs/version-2.8.0/concepts-overview.md b/site2/website-next/versioned_docs/version-2.8.0/concepts-overview.md
new file mode 100644
index 0000000..a37b11f
--- /dev/null
+++ b/site2/website-next/versioned_docs/version-2.8.0/concepts-overview.md
@@ -0,0 +1,31 @@
+---
+id: concepts-overview
+title: Pulsar Overview
+sidebar_label: Overview
+original_id: concepts-overview
+---
+
+Pulsar is a multi-tenant, high-performance solution for server-to-server messaging. Pulsar was originally developed by Yahoo, it is under the stewardship of the [Apache Software Foundation](https://www.apache.org/).
+
+Key features of Pulsar are listed below:
+
+* Native support for multiple clusters in a Pulsar instance, with seamless [geo-replication](administration-geo.md) of messages across clusters.
+* Very low publish and end-to-end latency.
+* Seamless scalability to over a million topics.
+* A simple [client API](concepts-clients.md) with bindings for [Java](client-libraries-java.md), [Go](client-libraries-go.md), [Python](client-libraries-python.md) and [C++](client-libraries-cpp.md).
+* Multiple [subscription modes](concepts-messaging.md#subscription-modes) ([exclusive](concepts-messaging.md#exclusive), [shared](concepts-messaging.md#shared), and [failover](concepts-messaging.md#failover)) for topics.
+* Guaranteed message delivery with [persistent message storage](concepts-architecture-overview.md#persistent-storage) provided by [Apache BookKeeper](http://bookkeeper.apache.org/).
+* A serverless light-weight computing framework [Pulsar Functions](functions-overview.md) offers the capability for stream-native data processing.
+* A serverless connector framework [Pulsar IO](io-overview.md), which is built on Pulsar Functions, makes it easier to move data in and out of Apache Pulsar.
+* [Tiered Storage](concepts-tiered-storage.md) offloads data from hot/warm storage to cold/longterm storage (such as S3 and GCS) when the data is aging out.
+
+## Contents
+
+- [Messaging Concepts](concepts-messaging.md)
+- [Architecture Overview](concepts-architecture-overview.md)
+- [Pulsar Clients](concepts-clients.md)
+- [Geo Replication](concepts-replication.md)
+- [Multi Tenancy](concepts-multi-tenancy.md)
+- [Authentication and Authorization](concepts-authentication.md)
+- [Topic Compaction](concepts-topic-compaction.md)
+- [Tiered Storage](concepts-tiered-storage.md)
diff --git a/site2/website-next/versioned_docs/version-2.8.0/concepts-proxy-sni-routing.md b/site2/website-next/versioned_docs/version-2.8.0/concepts-proxy-sni-routing.md
new file mode 100644
index 0000000..419ff6d
--- /dev/null
+++ b/site2/website-next/versioned_docs/version-2.8.0/concepts-proxy-sni-routing.md
@@ -0,0 +1,151 @@
+---
+id: concepts-proxy-sni-routing
+title: Proxy support with SNI routing
+sidebar_label: Proxy support with SNI routing
+original_id: concepts-proxy-sni-routing
+---
+
+A proxy server is an intermediary server that forwards requests from multiple clients to different servers across the Internet. The proxy server acts as a "traffic cop" in both forward and reverse proxy scenarios, and benefits your system such as load balancing, performance, security, auto-scaling, and so on.
+
+The proxy in Pulsar acts as a reverse proxy, and creates a gateway in front of brokers. Proxies such as Apache Traffic Server (ATS), HAProxy, Nginx, and Envoy are not supported in Pulsar. These proxy-servers support **SNI routing**. SNI routing is used to route traffic to a destination without terminating the SSL connection. Layer 4 routing provides greater transparency because the outbound connection is determined by examining the destination address in the client TCP packets.
+
+Pulsar clients (Java, C++, Python) support [SNI routing protocol](https://github.com/apache/pulsar/wiki/PIP-60:-Support-Proxy-server-with-SNI-routing), so you can connect to brokers through the proxy. This document walks you through how to set up the ATS proxy, enable SNI routing, and connect Pulsar client to the broker through the ATS proxy.
+
+## ATS-SNI Routing in Pulsar
+To support [layer-4 SNI routing](https://docs.trafficserver.apache.org/en/latest/admin-guide/layer-4-routing.en.html) with ATS, the inbound connection must be a TLS connection. Pulsar client supports SNI routing protocol on TLS connection, so when Pulsar clients connect to broker through ATS proxy, Pulsar uses ATS as a reverse proxy.
+
+Pulsar supports SNI routing for geo-replication, so brokers can connect to brokers in other clusters through the ATS proxy.
+
+This section explains how to set up and use ATS as a reverse proxy, so Pulsar clients can connect to brokers through the ATS proxy using the SNI routing protocol on TLS connection. 
+
+### Set up ATS Proxy for layer-4 SNI routing
+To support layer 4 SNI routing, you need to configure the `records.conf` and `ssl_server_name.conf` files.
+
+![Pulsar client SNI](/assets/pulsar-sni-client.png)
+
+The [records.config](https://docs.trafficserver.apache.org/en/latest/admin-guide/files/records.config.en.html) file is located in the `/usr/local/etc/trafficserver/` directory by default. The file lists configurable variables used by the ATS.
+
+To configure the `records.config` files, complete the following steps.
+1. Update TLS port (`http.server_ports`) on which proxy listens, and update proxy certs (`ssl.client.cert.path` and `ssl.client.cert.filename`) to secure TLS tunneling. 
+2. Configure server ports (`http.connect_ports`) used for tunneling to the broker. If Pulsar brokers are listening on `4443` and `6651` ports, add the brokers service port in the `http.connect_ports` configuration.
+
+The following is an example.
+
+```
+# PROXY TLS PORT
+CONFIG proxy.config.http.server_ports STRING 4443:ssl 4080
+# PROXY CERTS FILE PATH
+CONFIG proxy.config.ssl.client.cert.path STRING /proxy-cert.pem
+# PROXY KEY FILE PATH
+CONFIG proxy.config.ssl.client.cert.filename STRING /proxy-key.pem
+
+
+# The range of origin server ports that can be used for tunneling via CONNECT. # Traffic Server allows tunnels only to the specified ports. Supports both wildcards (*) and ranges (e.g. 0-1023).
+CONFIG proxy.config.http.connect_ports STRING 4443 6651
+```
+
+The [ssl_server_name](https://docs.trafficserver.apache.org/en/8.0.x/admin-guide/files/ssl_server_name.yaml.en.html) file is used to configure TLS connection handling for inbound and outbound connections. The configuration is determined by the SNI values provided by the inbound connection. The file consists of a set of configuration items, and each is identified by an SNI value (`fqdn`). When an inbound TLS connection is made, the SNI value from the TLS negotiation is matched with the it [...]
+
+The following example shows mapping of the inbound SNI hostname coming from the client, and the actual broker service URL where request should be redirected. For example, if the client sends the SNI header `pulsar-broker1`, the proxy creates a TLS tunnel by redirecting request to the `pulsar-broker1:6651` service URL.
+
+```
+server_config = {
+  {
+     fqdn = 'pulsar-broker-vip',
+     # Forward to Pulsar broker which is listening on 6651
+     tunnel_route = 'pulsar-broker-vip:6651'
+  },
+  {
+     fqdn = 'pulsar-broker1',
+     # Forward to Pulsar broker-1 which is listening on 6651
+     tunnel_route = 'pulsar-broker1:6651'
+  },
+  {
+     fqdn = 'pulsar-broker2',
+     # Forward to Pulsar broker-2 which is listening on 6651
+     tunnel_route = 'pulsar-broker2:6651'
+  },
+}
+```
+
+After you configure the `ssl_server_name.config` and `records.config` files, the ATS-proxy server handles SNI routing and creates TCP tunnel between the client and the broker.
+
+### Configure Pulsar-client with SNI routing
+ATS SNI-routing works only with TLS. You need to enable TLS for the ATS proxy and brokers first, configure the SNI routing protocol, and then connect Pulsar clients to brokers through ATS proxy. Pulsar clients support SNI routing by connecting to the proxy, and sending the target broker URL to the SNI header. This process is processed internally. You only need to configure the following proxy configuration initially when you create a Pulsar client to use the SNI routing protocol.
+
+<!--DOCUSAURUS_CODE_TABS-->
+
+<!--Java-->
+
+```java
+String brokerServiceUrl = “pulsar+ssl://pulsar-broker-vip:6651/”;
+String proxyUrl = “pulsar+ssl://ats-proxy:443”;
+ClientBuilder clientBuilder = PulsarClient.builder()
+		.serviceUrl(brokerServiceUrl)
+        .tlsTrustCertsFilePath(TLS_TRUST_CERT_FILE_PATH)
+        .enableTls(true)
+        .allowTlsInsecureConnection(false)
+        .proxyServiceUrl(proxyUrl, ProxyProtocol.SNI)
+        .operationTimeout(1000, TimeUnit.MILLISECONDS);
+
+Map<String, String> authParams = new HashMap<>();
+authParams.put("tlsCertFile", TLS_CLIENT_CERT_FILE_PATH);
+authParams.put("tlsKeyFile", TLS_CLIENT_KEY_FILE_PATH);
+clientBuilder.authentication(AuthenticationTls.class.getName(), authParams);
+
+PulsarClient pulsarClient = clientBuilder.build();
+```
+
+<!--C++-->
+
+```c++
+ClientConfiguration config = ClientConfiguration();
+config.setUseTls(true);
+config.setTlsTrustCertsFilePath("/path/to/cacert.pem");
+config.setTlsAllowInsecureConnection(false);
+config.setAuth(pulsar::AuthTls::create(
+            "/path/to/client-cert.pem", "/path/to/client-key.pem"););
+
+Client client("pulsar+ssl://ats-proxy:443", config);
+```
+
+<!--Python-->
+
+```python
+from pulsar import Client, AuthenticationTLS
+
+auth = AuthenticationTLS("/path/to/my-role.cert.pem", "/path/to/my-role.key-pk8.pem")
+client = Client("pulsar+ssl://ats-proxy:443",
+                tls_trust_certs_file_path="/path/to/ca.cert.pem",
+                tls_allow_insecure_connection=False,
+                authentication=auth)
+```
+
+<!--END_DOCUSAURUS_CODE_TABS-->
+
+### Pulsar geo-replication with SNI routing
+You can use the ATS proxy for geo-replication. Pulsar brokers can connect to brokers in geo-replication by using SNI routing. To enable SNI routing for broker connection cross clusters, you need to configure SNI proxy URL to the cluster metadata. If you have configured SNI proxy URL in the cluster metadata, you can connect to broker cross clusters through the proxy over SNI routing.
+
+![Pulsar client SNI](/assets/pulsar-sni-geo.png)
+
+In this example, a Pulsar cluster is deployed into two separate regions, `us-west` and `us-east`. Both regions are configured with ATS proxy, and brokers in each region run behind the ATS proxy. We configure the cluster metadata for both clusters, so brokers in one cluster can use SNI routing and connect to brokers in other clusters through the ATS proxy.
+
+(a) Configure the cluster metadata for `us-east` with `us-east` broker service URL and `us-east` ATS proxy URL with SNI proxy-protocol.
+
+```
+./pulsar-admin clusters update \
+--broker-url-secure pulsar+ssl://east-broker-vip:6651 \
+--url http://east-broker-vip:8080 \
+--proxy-protocol SNI \
+--proxy-url pulsar+ssl://east-ats-proxy:443
+```
+
+(b) Configure the cluster metadata for `us-west` with `us-west` broker service URL and `us-west` ATS proxy URL with SNI proxy-protocol.
+
+```
+./pulsar-admin clusters update \
+--broker-url-secure pulsar+ssl://west-broker-vip:6651 \
+--url http://west-broker-vip:8080 \
+--proxy-protocol SNI \
+--proxy-url pulsar+ssl://west-ats-proxy:443
+```
diff --git a/site2/website-next/versioned_docs/version-2.8.0/concepts-replication.md b/site2/website-next/versioned_docs/version-2.8.0/concepts-replication.md
new file mode 100644
index 0000000..09f16e4
--- /dev/null
+++ b/site2/website-next/versioned_docs/version-2.8.0/concepts-replication.md
@@ -0,0 +1,9 @@
+---
+id: concepts-replication
+title: Geo Replication
+sidebar_label: Geo Replication
+original_id: concepts-replication
+---
+
+Pulsar enables messages to be produced and consumed in different geo-locations. For instance, your application may be publishing data in one region or market and you would like to process it for consumption in other regions or markets. [Geo-replication](administration-geo.md) in Pulsar enables you to do that.
+
diff --git a/site2/website-next/versioned_docs/version-2.8.0/concepts-tiered-storage.md b/site2/website-next/versioned_docs/version-2.8.0/concepts-tiered-storage.md
new file mode 100644
index 0000000..c00f109
--- /dev/null
+++ b/site2/website-next/versioned_docs/version-2.8.0/concepts-tiered-storage.md
@@ -0,0 +1,18 @@
+---
+id: concepts-tiered-storage
+title: Tiered Storage
+sidebar_label: Tiered Storage
+original_id: concepts-tiered-storage
+---
+
+Pulsar's segment oriented architecture allows for topic backlogs to grow very large, effectively without limit. However, this can become expensive over time.
+
+One way to alleviate this cost is to use Tiered Storage. With tiered storage, older messages in the backlog can be moved from BookKeeper to a cheaper storage mechanism, while still allowing clients to access the backlog as if nothing had changed.
+
+![Tiered Storage](/assets/pulsar-tiered-storage.png)
+
+> Data written to BookKeeper is replicated to 3 physical machines by default. However, once a segment is sealed in BookKeeper it becomes immutable and can be copied to long term storage. Long term storage can achieve cost savings by using mechanisms such as [Reed-Solomon error correction](https://en.wikipedia.org/wiki/Reed%E2%80%93Solomon_error_correction) to require fewer physical copies of data.
+
+Pulsar currently supports S3, Google Cloud Storage (GCS), and filesystem for [long term store](https://pulsar.apache.org/docs/en/cookbooks-tiered-storage/). Offloading to long term storage triggered via a Rest API or command line interface. The user passes in the amount of topic data they wish to retain on BookKeeper, and the broker will copy the backlog data to long term storage. The original data will then be deleted from BookKeeper after a configured delay (4 hours by default).
+
+> For a guide for setting up tiered storage, see the [Tiered storage cookbook](cookbooks-tiered-storage.md).
diff --git a/site2/website-next/versioned_docs/version-2.8.0/concepts-topic-compaction.md b/site2/website-next/versioned_docs/version-2.8.0/concepts-topic-compaction.md
new file mode 100644
index 0000000..3aca60f
--- /dev/null
+++ b/site2/website-next/versioned_docs/version-2.8.0/concepts-topic-compaction.md
@@ -0,0 +1,37 @@
+---
+id: concepts-topic-compaction
+title: Topic Compaction
+sidebar_label: Topic Compaction
+original_id: concepts-topic-compaction
+---
+
+Pulsar was built with highly scalable [persistent storage](concepts-architecture-overview.md#persistent-storage) of message data as a primary objective. Pulsar topics enable you to persistently store as many unacknowledged messages as you need while preserving message ordering. By default, Pulsar stores *all* unacknowledged/unprocessed messages produced on a topic. Accumulating many unacknowledged messages on a topic is necessary for many Pulsar use cases but it can also be very time int [...]
+
+> For a more practical guide to topic compaction, see the [Topic compaction cookbook](cookbooks-compaction.md).
+
+For some use cases consumers don't need a complete "image" of the topic log. They may only need a few values to construct a more "shallow" image of the log, perhaps even just the most recent value. For these kinds of use cases Pulsar offers **topic compaction**. When you run compaction on a topic, Pulsar goes through a topic's backlog and removes messages that are *obscured* by later messages, i.e. it goes through the topic on a per-key basis and leaves only the most recent message assoc [...]
+
+Pulsar's topic compaction feature:
+
+* Allows for faster "rewind" through topic logs
+* Applies only to [persistent topics](concepts-architecture-overview.md#persistent-storage)
+* Triggered automatically when the backlog reaches a certain size or can be triggered manually via the command line. See the [Topic compaction cookbook](cookbooks-compaction.md)
+* Is conceptually and operationally distinct from [retention and expiry](concepts-messaging.md#message-retention-and-expiry). Topic compaction *does*, however, respect retention. If retention has removed a message from the message backlog of a topic, the message will also not be readable from the compacted topic ledger.
+
+> #### Topic compaction example: the stock ticker
+> An example use case for a compacted Pulsar topic would be a stock ticker topic. On a stock ticker topic, each message bears a timestamped dollar value for stocks for purchase (with the message key holding the stock symbol, e.g. `AAPL` or `GOOG`). With a stock ticker you may care only about the most recent value(s) of the stock and have no interest in historical data (i.e. you don't need to construct a complete image of the topic's sequence of messages per key). Compaction would be high [...]
+
+
+## How topic compaction works
+
+When topic compaction is triggered [via the CLI](cookbooks-compaction.md), Pulsar will iterate over the entire topic from beginning to end. For each key that it encounters the compaction routine will keep a record of the latest occurrence of that key.
+
+After that, the broker will create a new [BookKeeper ledger](concepts-architecture-overview.md#ledgers) and make a second iteration through each message on the topic. For each message, if the key matches the latest occurrence of that key, then the key's data payload, message ID, and metadata will be written to the newly created ledger. If the key doesn't match the latest then the message will be skipped and left alone. If any given message has an empty payload, it will be skipped and con [...]
+
+After the initial compaction operation, the Pulsar [broker](reference-terminology.md#broker) that owns the topic is notified whenever any future changes are made to the compaction horizon and compacted backlog. When such changes occur:
+
+* Clients (consumers and readers) that have read compacted enabled will attempt to read messages from a topic and either:
+  * Read from the topic like normal (if the message ID is greater than or equal to the compaction horizon) or
+  * Read beginning at the compaction horizon (if the message ID is lower than the compaction horizon)
+
+
diff --git a/site2/website-next/versioned_docs/version-2.8.0/concepts-transactions.md b/site2/website-next/versioned_docs/version-2.8.0/concepts-transactions.md
new file mode 100644
index 0000000..12a32e8
--- /dev/null
+++ b/site2/website-next/versioned_docs/version-2.8.0/concepts-transactions.md
@@ -0,0 +1,30 @@
+---
+id: transactions
+title: Transactions
+sidebar_label: Overview
+original_id: transactions
+---
+
+Transactional semantics enable event streaming applications to consume, process, and produce messages in one atomic operation. In Pulsar, a producer or consumer can work with messages across multiple topics and partitions and ensure those messages are processed as a single unit. 
+
+The following concepts help you understand Pulsar transactions.
+
+## Transaction coordinator and transaction log
+The transaction coordinator maintains the topics and subscriptions that interact in a transaction. When a transaction is committed, the transaction coordinator interacts with the topic owner broker to complete the transaction.
+
+The transaction coordinator maintains the entire life cycle of transactions, and prevents a transaction from incorrect status.
+
+The transaction coordinator handles transaction timeout, and ensures that the transaction is aborted after a transaction timeout.
+
+All the transaction metadata is persisted in the transaction log. The transaction log is backed by a Pulsar topic. After the transaction coordinator crashes, it can restore the transaction metadata from the transaction log.
+
+## Transaction ID
+The transaction ID (TxnID) identifies a unique transaction in Pulsar. The transaction ID is 128-bit. The highest 16 bits are reserved for the ID of the transaction coordinator, and the remaining bits are used for monotonically increasing numbers in each transaction coordinator. It is easy to locate the transaction crash with the TxnID.
+
+## Transaction buffer
+Messages produced within a transaction are stored in the transaction buffer. The messages in transaction buffer are not materialized (visible) to consumers until the transactions are committed. The messages in the transaction buffer are discarded when the transactions are aborted. 
+
+## Pending acknowledge state
+Message acknowledges within a transaction are maintained by the pending acknowledge state before the transaction completes. If a message is in the pending acknowledge state, the message cannot be acknowledged by other transactions until the message is removed from the pending acknowledge state.
+
+The pending acknowledge state is persisted to the pending acknowledge log. The pending acknowledge log is backed by a Pulsar topic. A new broker can restore the state from the pending acknowledge log to ensure the acknowledgement is not lost.
diff --git a/site2/website-next/versioned_sidebars/version-2.7.3-sidebars.json b/site2/website-next/versioned_sidebars/version-2.7.3-sidebars.json
index 14e0f56..5aedbd4 100644
--- a/site2/website-next/versioned_sidebars/version-2.7.3-sidebars.json
+++ b/site2/website-next/versioned_sidebars/version-2.7.3-sidebars.json
@@ -19,6 +19,54 @@
       ],
       "collapsible": true,
       "collapsed": true
+    },
+    {
+      "type": "category",
+      "label": "Concepts and Architecture",
+      "items": [
+        {
+          "type": "doc",
+          "id": "version-2.7.3/concepts-overview"
+        },
+        {
+          "type": "doc",
+          "id": "version-2.7.3/concepts-messaging"
+        },
+        {
+          "type": "doc",
+          "id": "version-2.7.3/concepts-architecture-overview"
+        },
+        {
+          "type": "doc",
+          "id": "version-2.7.3/concepts-clients"
+        },
+        {
+          "type": "doc",
+          "id": "version-2.7.3/concepts-replication"
+        },
+        {
+          "type": "doc",
+          "id": "version-2.7.3/concepts-multi-tenancy"
+        },
+        {
+          "type": "doc",
+          "id": "version-2.7.3/concepts-authentication"
+        },
+        {
+          "type": "doc",
+          "id": "version-2.7.3/concepts-topic-compaction"
+        },
+        {
+          "type": "doc",
+          "id": "version-2.7.3/concepts-proxy-sni-routing"
+        },
+        {
+          "type": "doc",
+          "id": "version-2.7.3/concepts-multiple-advertised-listeners"
+        }
+      ],
+      "collapsible": true,
+      "collapsed": true
     }
   ]
-}
+}
\ No newline at end of file
diff --git a/site2/website-next/versioned_sidebars/version-2.8.0-sidebars.json b/site2/website-next/versioned_sidebars/version-2.8.0-sidebars.json
index b9d58b6..1bdb421 100644
--- a/site2/website-next/versioned_sidebars/version-2.8.0-sidebars.json
+++ b/site2/website-next/versioned_sidebars/version-2.8.0-sidebars.json
@@ -19,6 +19,54 @@
       ],
       "collapsible": true,
       "collapsed": true
+    },
+    {
+      "type": "category",
+      "label": "Concepts and Architecture",
+      "items": [
+        {
+          "type": "doc",
+          "id": "version-2.8.0/concepts-overview"
+        },
+        {
+          "type": "doc",
+          "id": "version-2.8.0/concepts-messaging"
+        },
+        {
+          "type": "doc",
+          "id": "version-2.8.0/concepts-architecture-overview"
+        },
+        {
+          "type": "doc",
+          "id": "version-2.8.0/concepts-clients"
+        },
+        {
+          "type": "doc",
+          "id": "version-2.8.0/concepts-replication"
+        },
+        {
+          "type": "doc",
+          "id": "version-2.8.0/concepts-multi-tenancy"
+        },
+        {
+          "type": "doc",
+          "id": "version-2.8.0/concepts-authentication"
+        },
+        {
+          "type": "doc",
+          "id": "version-2.8.0/concepts-topic-compaction"
+        },
+        {
+          "type": "doc",
+          "id": "version-2.8.0/concepts-proxy-sni-routing"
+        },
+        {
+          "type": "doc",
+          "id": "version-2.8.0/concepts-multiple-advertised-listeners"
+        }
+      ],
+      "collapsible": true,
+      "collapsed": true
     }
   ]
 }