You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@pulsar.apache.org by GitBox <gi...@apache.org> on 2018/05/02 00:52:44 UTC

[GitHub] merlimat closed pull request #1553: Pulsar 2.0 docs

merlimat closed pull request #1553: Pulsar 2.0 docs
URL: https://github.com/apache/incubator-pulsar/pull/1553
 
 
   

This is a PR merged from a forked repository.
As GitHub hides the original diff on merge, it is displayed below for
the sake of provenance:

As this is a foreign pull request (from a fork), the diff is supplied
below (as it won't show otherwise due to GitHub magic):

diff --git a/site/_config.yml b/site/_config.yml
index b154071ab5..d4a060e49d 100644
--- a/site/_config.yml
+++ b/site/_config.yml
@@ -29,6 +29,7 @@ preview_version_id: 20180426.125800-32
 current_version: 1.22.0-incubating
 python_latest: "1.22.0"
 archived_releases:
+  - 1.22.0-incubating
   - 1.21.0-incubating
   - 1.20.0-incubating
   - 1.19.0-incubating
@@ -53,16 +54,10 @@ defaults:
 exclude:
 - Gemfile
 - Gemfile.lock
-- gulpfile.js
-- package.json
-- popovers.yaml
 - Makefile
 - scripts
 - vendor
-- node_modules
-- docs/example.md
 - generated
-- package-lock.json
 - Rakefile
 - VERSIONS
 - README.md
diff --git a/site/_data/cli/pulsar-admin.yaml b/site/_data/cli/pulsar-admin.yaml
index 424c2b41a2..21806199f9 100644
--- a/site/_data/cli/pulsar-admin.yaml
+++ b/site/_data/cli/pulsar-admin.yaml
@@ -18,7 +18,7 @@
 #
 
 description: |
-  The `pulsar-admin` tool enables you to manage Pulsar installations, including clusters, brokers, namespaces, properties, and more.
+  The `pulsar-admin` tool enables you to manage Pulsar installations, including clusters, brokers, namespaces, tenants, and more.
 commands:
 - name: broker-stats
   description: Operations to collect broker statistics
@@ -302,33 +302,33 @@ commands:
     - flags: --triggerValue
       description: The value with which the Pulsar Function is to be triggered
 - name: namespaces
-  description: Operations about namespaces
+  description: Operations for managing namespaces
   subcommands:
   - name: list
-    description: Get the namespaces for a property
-    argument: property-name
+    description: Get the namespaces for a tenant
+    argument: tenant-name
   - name: list-cluster
-    description: Get the namespaces for a property in the cluster
-    argument: property/cluster
+    description: Get the namespaces for a tenant in the cluster
+    argument: tenant/cluster
   - name: destinations
     description: Get the destinations for a namespace
-    argument: property/cluster/namespace
+    argument: tenant/cluster/namespace
   - name: policies
     description: Get the policies of a namespace
-    argument: property/cluster/namespace
+    argument: tenant/cluster/namespace
   - name: create
     description: Create a new namespace
-    argument: property/cluster/namespace
+    argument: tenant/cluster/namespace
     options:
     - flags: -b, --bundles
       description: The number of bundles to activate
       default: 0
   - name: delete
     description: Deletes a namespace
-    argument: property/cluster/namespace
+    argument: tenant/cluster/namespace
   - name: set-deduplication
     description: Enable or disable message deduplication on a namespace
-    argument: property/cluster/namespace
+    argument: tenant/cluster/namespace
     options:
     - flags: --enable, -e
       description: Enable message deduplication on the specified namespace
@@ -338,10 +338,10 @@ commands:
       default: 'false'
   - name: permissions
     description: Get the permissions on a namespace
-    argument: property/cluster/namespace
+    argument: tenant/cluster/namespace
   - name: grant-permission
     description: Grant permissions on a namespace
-    argument: property/cluster/namespace
+    argument: tenant/cluster/namespace
     options:
     - flags: --actions
       description: Actions to be granted (`produce` or `consume`)
@@ -349,25 +349,25 @@ commands:
       description: The client role to which to grant the permissions
   - name: revoke-permission
     description: Revoke permissions on a namespace
-    argument: property/cluster/namespace
+    argument: tenant/cluster/namespace
     options:
     - flags: --role
       description: The client role from which to revoke the permissions
   - name: set-clusters
     description: Set replication clusters for a namespace
-    argument: property/cluster/namespace
+    argument: tenant/cluster/namespace
     options:
     - flags: -c, --clusters
       description: Replication clusters ID list (comma-separated values)
   - name: get-clusters
     description: Get replication clusters for a namespace
-    argument: property/cluster/namespace
+    argument: tenant/cluster/namespace
   - name: get-backlog-quotas
     description: Get the backlog quota policies for a namespace
-    argument: property/cluster/namespace
+    argument: tenant/cluster/namespace
   - name: set-backlog-quota
     description: Set a backlog quota for a namespace
-    argument: property/cluster/namespace
+    argument: tenant/cluster/namespace
     example: |
       pulsar-admin namespaces set-backlog-quota my-prop/my-cluster/my-ns \
       --limit 2G \
@@ -383,13 +383,13 @@ commands:
         * `consumer_backlog_eviction`
   - name: remove-backlog-quota
     description: Remove a backlog quota policy from a namespace
-    argument: property/cluster/namespace
+    argument: tenant/cluster/namespace
   - name: get-persistence
     description: Get the persistence policies for a namespace
-    argument: property/cluster/namespace
+    argument: tenant/cluster/namespace
   - name: set-persistence
     description: Set the persistence policies for a namespace
-    argument: property/cluster/namespace
+    argument: tenant/cluster/namespace
     options:
     - flags: -a, --bookkeeper-ack-quorom
       description: The number of acks (guaranteed copies) to wait for each entry
@@ -404,7 +404,7 @@ commands:
       description: Throttling rate of mark-delete operation (0 means no throttle)
   - name: get-message-ttl
     description: Get the message TTL for a namespace
-    argument: property/cluster/namespace
+    argument: tenant/cluster/namespace
   - name: set-message-ttl
     description: Set the message TTL for a namespace
     options:
@@ -413,10 +413,10 @@ commands:
       default: 0
   - name: get-retention
     description: Get the retention policy for a namespace
-    argument: property/cluster/namespace
+    argument: tenant/cluster/namespace
   - name: set-retention
     description: Set the retention policy for a namespace
-    argument: property/cluster/namespace
+    argument: tenant/cluster/namespace
     options:
     - flags: -s, --size
       description: The retention size limits (for example `10M`, `16G` or `3T`). 0 means no retention and -1 means infinite size retention
@@ -424,13 +424,13 @@ commands:
       description: "The retention time in minutes, hours, days, or weeks. Examples: `100m`, `13h`, `2d`, `5w`. 0 means no retention and -1 means infinite time retention"
   - name: unload
     description: Unload a namespace or namespace bundle from the current serving broker.
-    argument: property/cluster/namespace
+    argument: tenant/cluster/namespace
     options:
     - flags: -b, --bundle
       description: # `{start_boundary}_{end_boundary}`
   - name: clear-backlog
     description: Clear the backlog for a namespace
-    argument: property/cluster/namespace
+    argument: tenant/cluster/namespace
     options:
     - flags: -b, --bundle
       description: # `{start_boundary}_{end_boundary}`
@@ -441,7 +441,7 @@ commands:
       description: The subscription name
   - name: unsubscribe
     description: Unsubscribe the given subscription on all destinations on a namespace
-    argument: property/cluster/namespace
+    argument: tenant/cluster/namespace
     options:
     - flags: -b, --bundle
       description: # `{start_boundary}_{end_boundary}`
@@ -477,18 +477,50 @@ commands:
     argument: cluster-name
   - name: delete
     description: Delete namespace isolation policy of a cluster. This operation requires superuser privileges.
-- name: persistent
-  description: Operations on persistent topics
+- name: topics
+  description: Operations for managing Pulsar topics (both persistent and non persistent)
   subcommands:
+  - name: compact
+    description: Run compaction on the specified topic (persistent topics only)
+    argument: persistent://tenant/namespace/topic
+  - name: compaction-status
+    description: Check the status of a topic compaction (persistent topics only)
+    argument: persistent://tenant/namespace/topic
+    options:
+    - flags: -w, --wait-complete
+      description: Wait for compaction to complete
+      default: 'false'
+  - name: create-partitioned-topic
+    description: Create a partitioned topic. A partitioned topic must be created before producers can publish to it.
+    argument: "{persistent|non-persistent}://tenant/namespace/topic"
+    options:
+    - flags: -p, --partitions
+      description: The number of partitions for the topic
+      default: 0
+  - name: delete-partitioned-topic
+    description: Delete a partitioned topic. This will also delete all the partitions of the topic if they exist.
+    argument: "{persistent|non-persistent}://tenant/namespace/topic"
+  - name: get-partitioned-topic-metadata
+    description: Get the partitioned topic metadata. If the topic is not created or is a non-partitioned topic, this will return an empty topic with zero partitions.
+    argument: "{persistent|non-persistent}://tenant/namespace/topic"
   - name: list
     description: Get the list of topics under a namespace
-    argument: property/cluster/namespace
+    argument: tenant/cluster/namespace
+  - name: list-in-bundle
+    description: Get a list of non-persistent topics present under a namespace bundle
+    argument: tenant/namespace
+    options:
+    - flags: -b, --bundle
+      description: The bundle range
+  - name: terminate
+    description: Terminate a topic (disallow further messages from being published on the topic)
+    argument: "{persistent|non-persistent}://tenant/namespace/topic"
   - name: permissions
     description: Get the permissions on a topic. Retrieve the effective permissions for a desination. These permissions are defined by the permissions set at the namespace level combined (union) with any eventual specific permissions set on the topic.
-    argument: persistent://property/cluster/namespace/topic
+    argument: topic
   - name: grant-permission
     description: Grant a new permission to a client role on a single topic
-    argument: persistent://property/cluster/namespace/topic
+    argument: "{persistent|non-persistent}://tenant/namespace/topic"
     options:
     - flags: --actions
       description: Actions to be granted (`produce` or `consume`)
@@ -496,47 +528,47 @@ commands:
       description: The client role to which permissions are to be granted
   - name: revoke-permission
     description: Revoke permissions to a client role on a single topic. If the permission was not set at the topic level, but rather at the namespace level, this operation will return an error (HTTP status code 412).
-    argument: persistent://property/cluster/namespace/topic
+    argument: topic
   - name: lookup
     description: Look up a topic from the current serving broker
-    argument: persistent://property/cluster/namespace/topic
+    argument: topic
   - name: bundle-range
     description: Get the namespace bundle which contains the given topic
-    argument: persistent://property/cluster/namespace/topic
+    argument: topic
   - name: delete
     description: Delete a topic. The topic cannot be deleted if there are any active subscriptions or producers connected to the topic.
-    argument: persistent://property/cluster/namespace/topic
+    argument: topic
   - name: unload
     description: Unload a topic
-    argument: persistent://property/cluster/namespace/topic
+    argument: topic
   - name: subscriptions
     description: Get the list of subscriptions on the topic
-    argument: persistent://property/cluster/namespace/topic
+    argument: topic
   - name: unsubscribe
     description: Delete a durable subscriber from a topic
-    argument: persistent://property/cluster/namespace/topic
+    argument: topic
     options:
     - flags: -s, --subscription
       description: The subscription to delete
   - name: stats
     description: Get the stats for the topic and its connected producers and consumers. All rates are computed over a 1-minute window and are relative to the last completed 1-minute period.
-    argument: persistent://property/cluster/namespace/topic
+    argument: topic
   - name: stats-internal
     description: Get the internal stats for the topic
-    argument: persistent://property/cluster/namespace/topic
+    argument: topic
   - name: info-internal
     description: Get the internal metadata info for the topic
-    argument: persistent://property/cluster/namespace/topic
+    argument: topic
   - name: partitioned-stats
     description: Get the stats for the partitioned topic and its connected producers and consumers. All rates are computed over a 1-minute window and are relative to the last completed 1-minute period.
-    argument: persistent://property/cluster/namespace/topic
+    argument: topic
     options:
     - flags: --per-partition
       description: Get per-partition stats
       default: 'false'
   - name: skip
-    description: SKip some messages for the subscription
-    argument: persistent://property/cluster/namespace/topic
+    description: Skip some messages for the subscription
+    argument: topic
     options:
     - flags: -n, --count
       description: The number of messages to skip
@@ -545,13 +577,13 @@ commands:
       description: The subscription on which to skip messages
   - name: skip-all
     description: Skip all the messages for the subscription
-    argument: persistent://property/cluster/namespace/topic
+    argument: topic
     options:
     - flags: -s, --subscription
       description: The subscription to clear
   - name: expire-messages
     description: Expire messages that are older than the given expiry time (in seconds) for the subscription.
-    argument: persistent://property/cluster/namespace/topic
+    argument: topic
     options:
     - flags: -t, --expireTime
       description: Expire messages older than the time (in seconds)
@@ -560,27 +592,14 @@ commands:
       description: The subscription to skip messages on
   - name: expire-messages-all-subscriptions
     description: Expire messages older than the given expiry time (in seconds) for all subscriptions
-    argument: persistent://property/cluster/namespace/topic
+    argument: topic
     options:
     - flags: -t, --expireTime
       description: Expire messages older than the time (in seconds)
       default: 0
-  - name: create-partitioned-topic
-    description: Create a partitioned topic
-    argument: persistent://property/cluster/namespace/topic
-    options:
-    - flags: -p, --partitions
-      description: The number of partitions for the topic
-      default: 0
-  - name: get-partitioned-topic-metadata
-    description: Get the partitioned topic metadata. If the topic is not created or is a non-partitioned topic, this will return an empty topic with zero partitions.
-    argument: persistent://property/cluster/namespace/topic
-  - name: delete-partitioned-topic
-    description: Delete a partitioned topic. This will also delete all the partitions of the topic if they exist.
-    argument: persistent://property/cluster/namespace/topic
   - name: peek-messages
     description: Peek some messages for the subscription.
-    argument: persistent://property/cluster/namespace/topic
+    argument: topic
     options:
     - flags: -n, --count
       description: The number of messages
@@ -589,72 +608,41 @@ commands:
       description: Subscription to get messages from
   - name: reset-cursor
     description: Reset position for subscription to closest to timestamp
-    argument: persistent://property/cluster/namespace/topic
+    argument: topic
     options:
     - flags: -s, --subscription
       description: Subscription to reset position on
     - flags: -t, --time
       description: "The time, in minutes, to reset back to (or minutes, hours, days, weeks, etc.). Examples: `100m`, `3h`, `2d`, `5w`."
-- name: non-persistent
-  description: Operations on persistent topics
-  subcommands:
-  - name: create-partitioned-topic
-    description: Create a partitioned topic. A partitioned non-persistent topic must be created before producers can publish to it.
-    argument: non-persistent://property/cluster/namespace/topic
-    options:
-    - flags: -p, --partitions
-      description: The number of partitions for the topic
-      default: 0
-  - name: lookup
-    description: Look up a non-persistent topic on the current serving broker
-    argument: non-persistent://property/cluster/namespace/topic
-  - name: stats
-    description: Get the stats for the topic and its connected producers and consumers. All rates are computed over a 1-minute window and are relative to the last completed 1-minute period.
-    argument: non-persistent://property/cluster/namespace/topic
-  - name: stats-internal
-    description: Get the internal stats for the topic
-    argument: non-persistent://property/cluster/namespace/topic
-  - name: get-partitioned-topic-metadata
-    description: Get the partitioned topic metadata. If the topic is not created or is a non-partitioned topic, this will return an empty topic with zero partitions.
-    argument: non-persistent://property/cluster/namespace/topic
-  - name: list
-    description: Get a list of non-persistent topics under a namespace
-    argument: property/cluster/namespace
-  - name: list-in-bundle
-    description: Get a list of non-persistent topics present under a namespace bundle
-    argument: property/cluster/namespace
-    options:
-    - flags: -b, --bundle
-      description: The bundle range
-- name: properties
-  description: Operations about properties
+- name: tenants
+  description: Operations for managing tenants
   subcommands:
   - name: list
-    description: List the existing properties
+    description: List the existing tenants
   - name: get
-    description: Gets the configuration of a property
-    argument: property-name
+    description: Gets the configuration of a tenant
+    argument: tenant-name
   - name: create
-    description: Creates a new property
-    argument: property-name
+    description: Creates a new tenant
+    argument: tenant-name
     options:
     - flags: -r, --admin-roles
       description: Comma-separated admin roles
     - flags: -c, --allowed-clusters
       description: Comma-separated allowed clusters
   - name: update
-    description: Updates a property
-    argument: property-name
+    description: Updates a tenant
+    argument: tenant-name
     options:
     - flags: -r, --admin-roles
       description: Comma-separated admin roles
     - flags: -c, --allowed-clusters
       description: Comma-separated allowed clusters
   - name: delete
-    description: Deletes an existing property
-    argument: property-name
+    description: Deletes an existing tenant
+    argument: tenant-name
 - name: resource-quotas
-  description: Operations about resource quotas
+  description: Operations for managing resource quotas
   subcommands:
   - name: get
     description: Get the resource quota for a specified namespace bundle, or default quota if no namespace/bundle is specified.
@@ -663,7 +651,7 @@ commands:
       description: A bundle of the form `{start-boundary}_{end_boundary}`. This must be specified together with `-n`/`--namespace`.
     - flags: -n, --namespace
       description: The namespace
-      argument: property/cluster/namespace
+      argument: tenant/namespace
   - name: set
     description: Set the resource quota for the specified namespace bundle, or default quota if no namespace/bundle is specified.
     options:
@@ -688,11 +676,11 @@ commands:
       description: Expected outgoing messages per second
       default: 0
     - flags: -n, --namespace
-      description: The namespace as `property/cluster/namespace`, for example `my-prop/my-cluster/my-ns`. Must be specified together with `-b`/`--bundle`.
+      description: The namespace as `tenant/namespace`, for example `my-tenant/my-ns`. Must be specified together with `-b`/`--bundle`.
   - name: reset-namespace-bundle-quota
     description: Reset the specifed namespace bundle's resource quota to a default value.
     options:
     - flags: -b, --bundle
       description: A bundle of the form `{start-boundary}_{end_boundary}`.
     - flags: -n, --namespace
-      description: The namespace as `property/cluster/namespace`, for example `my-prop/my-cluster/my-ns`.
+      description: The namespace as `tenant/namespace`, for example `my-tenant/my-ns`.
diff --git a/site/_data/cli/pulsar.yaml b/site/_data/cli/pulsar.yaml
index fee9b84dcc..dd252a79e2 100644
--- a/site/_data/cli/pulsar.yaml
+++ b/site/_data/cli/pulsar.yaml
@@ -61,7 +61,7 @@ commands:
   description: Run compaction against a Pulsar topic
   example: |
     pulsar compact-topic \
-      --topic persistent://sample/standalone/ns1/topic-to-compact
+      --topic topic-to-compact
   options:
   - flags: -t, --topic
     description: The Pulsar topic that you would like to compact
@@ -73,11 +73,11 @@ commands:
   options:
   - flags: -c, --conf
     description: Configuration file for the discovery service
-- name: global-zookeeper
-  description: Starts up global ZooKeeper
+- name: configuration-store
+  description: Starts up the Pulsar configuration store
   options:
   - flags: -c, --conf
-    description: Configuration file for global ZooKeeper
+    description: Configuration file for the configuration store
 - name: initialize-cluster-metadata
   description: One-time cluster metadata initialization
   options:
@@ -87,8 +87,8 @@ commands:
     description: The broker service URL for the new cluster with TLS encryption
   - flags: -c, --cluster
     description: Cluster name
-  - flags: -gzk, --global-zookeeper
-    description: The global ZooKeeper quorum connection string
+  - flags: --configuration-store
+    description: The configuration store quorum connection string
   - flags: -uw, --web-service-url
     description: The web service URL for the new cluster
   - flags: -tw, --web-service-url-tls
@@ -100,12 +100,12 @@ commands:
   example: |
     pulsar proxy \
       --zookeeper-servers zk-0,zk-1,zk2 \
-      --global-zookeeper-servers zk-0,zk-1,zk-2
+      --configuration-store zk-0,zk-1,zk-2
   options:
   - flags: -c, --config
     description: Path to a Pulsar proxy configuration file
-  - flags: -gzk, --global-zookeeper-servers
-    description: Global ZooKeeper connection string
+  - flags: --configuration-store
+    description: Configuration store connection string
   - flags: -zk, --zookeeper-servers
     description: Local ZooKeeper connection string
 - name: standalone
@@ -168,8 +168,8 @@ env_vars:
 - name: PULSAR_ZK_CONF
   description: Configuration file for zookeeper
   default: conf/zookeeper.conf
-- name: PULSAR_GLOBAL_ZK_CONF
-  description:  Configuration file for global zookeeper
+- name: PULSAR_CONFIGURATION_STORE_CONF
+  description:  Configuration file for the configuration store
   default: conf/global_zookeeper.conf
 - name: PULSAR_DISCOVERY_CONF
   description: Configuration file for discovery service
diff --git a/site/_data/config/global-zookeeper.yaml b/site/_data/config/global_zookeeper.yaml
similarity index 100%
rename from site/_data/config/global-zookeeper.yaml
rename to site/_data/config/global_zookeeper.yaml
diff --git a/site/_data/messages.yaml b/site/_data/messages.yaml
index 1996350d04..8c4a83643c 100644
--- a/site/_data/messages.yaml
+++ b/site/_data/messages.yaml
@@ -52,8 +52,6 @@ shared_mode_limitations:
 
     1. Message ordering is not guaranteed.
     2. You cannot use [cumulative acknowledgment](#acknowledgement) with shared mode.
-global_cluster: |
-  If you're running a multi-cluster Pulsar instance, you can manage instance-level
 superuser:
   type: warning
   content: Please note that this operation requires [superuser](../../admin/Authz#superusers) privileges.
diff --git a/site/_data/popovers.yaml b/site/_data/popovers.yaml
index f00197097b..f04df85db3 100644
--- a/site/_data/popovers.yaml
+++ b/site/_data/popovers.yaml
@@ -33,6 +33,10 @@ broker:
 cluster:
   q: What is a cluster?
   def: A set of Pulsar brokers and BookKeeper servers (aka bookies). Clusters can reside in different geographical regions and replicate messages to one another in a process called geo-replication.
+configuration-store:
+  q: What is the configuration store?
+  def: |
+    Pulsar's configuration store (previously known as global ZooKeeper) is a ZooKeeper quorum that is used for configuration-specific tasks. A multi-cluster Pulsar installation requires just one configuration store across all clusters.
 consumer:
   q: What is a consumer?
   def: A process that establishes a subscription to a Pulsar topic and processes messages published to that topic by producers.
@@ -51,9 +55,6 @@ geo-replication:
 global-topic:
   q: What is a global topic?
   def: A topic that belongs to a global, instance-wide namespace and thus isn't tied to any specific cluster.
-global-zookeeper:
-  q: What is global ZooKeeper?
-  def: "A ZooKeeper cluster that Pulsar uses for instance-wide, rather than cluster-specific, tasks."
 instance:
   q: What is a Pulsar instance?
   def: A group of Pulsar clusters that act together as a single unit.
@@ -116,4 +117,4 @@ unacknowledged:
 zookeeper:
   q: What is ZooKeeper?
   def: |
-    ZooKeeper is a service that Pulsar uses for coordination-related tasks.<br /><br />A Pulsar instance relies on both a local ZooKeeper for cluster-specific tasks and a global ZooKeeper for instance-wide tasks.
+    ZooKeeper is a service that Pulsar uses for coordination-related tasks.<br /><br />A Pulsar instance relies on both a local ZooKeeper for cluster-specific tasks and a ZooKeeper configuration store for instance-wide tasks.
diff --git a/site/_data/pulsar-functions.yaml b/site/_data/pulsar-functions.yaml
deleted file mode 100644
index bea3166141..0000000000
--- a/site/_data/pulsar-functions.yaml
+++ /dev/null
@@ -1,85 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-description: |
-  A tool for deploying and managing Pulsar Functions.
-example: |
-  pulsar-functions localrun \
-    --function-config my-function.yaml
-commands:
-- name: localrun
-  description: Runs a Pulsar Function
-- name: create
-  description: Creates a new Pulsar Function
-- name: delete
-  description: Deletes an existing Pulsar Function
-- name: update
-  description: Updates an existing Pulsar Function
-- name: get
-  description: Returns information about an existing Pulsar Function
-- name: list
-  description: Lists all currently existing Pulsar Functions
-  options:
-  - flags: --namespace
-    description: The namespace of the Pulsar Functions you'd like to list
-  - flags: --tenant
-    description: The tenant of the Pulsar Functions you'd like to list (you must also specify a namespace using the `--namespace` flag)
-- name: getstatus
-  description: Checks on the status of the specified Pulsar Function
-  options:
-  - flags: --namespace
-    description: The name of the Pulsar Function whose status you'd like to check
-  - flags: --tenant
-    description: The tenant of the Pulsar Function whose status you'd like to check
-  - flags: --tenant
-- name: querystate
-  description: Displays the current state of the specified Pulsar Function, by key
-  options:
-  - flags: -k, --key
-    description: The key for the desired value
-  - flags: --name
-    description: The name of the Pulsar Function whose current state you'd like to query
-  - flags: --namespace
-    description: The namespace of the Pulsar Function whose current state you'd like to query
-  - flags: -u, --storage-service-url
-    description: The URL of the storage service
-  - flags: --tenant
-    description: The tenant of the Pulsar Function whose current state you'd like to query
-  - flags: -w, --watch
-    description: If set, watch for changes in the current state of the specified Pulsar Function (by the key set using `-k`/`--key`)
-    default: 'false'
-options:
-  - flags: --name
-    description: The name of the Pulsar Function
-  - flags: --function-classname
-    description: The Java class name of the Pulsar Function
-  - flags: --function-classpath
-    description: The Java classpath of the Pulsar Function
-  - flags: --source-topic
-    description: The topic from which the Pulsar Function consumes its input
-  - flags: --sink-topic
-    description: The topic to which the Pulsar Function publishes its output (if any)
-  - flags: --input-serde-classname
-    description: Input SerDe
-    default: org.apache.pulsar.functions.runtime.serde.Utf8StringSerDe
-  - flags: --output-serde-classname
-    description: Output SerDe
-    default: org.apache.pulsar.functions.runtime.serde.Utf8StringSerDe
-  - flags: --function-config
-    description: The path for the Pulsar Function's YAML configuration file
\ No newline at end of file
diff --git a/site/_data/sidebar.yaml b/site/_data/sidebar.yaml
index a553beed4f..8aa26dffcd 100644
--- a/site/_data/sidebar.yaml
+++ b/site/_data/sidebar.yaml
@@ -21,6 +21,8 @@ groups:
 - title: Getting started
   dir: getting-started
   docs:
+  - title: Pulsar 2.0
+    endpoint: pulsar-2.0
   - title: Run Pulsar locally
     endpoint: LocalCluster
   - title: Pulsar in Docker
@@ -104,8 +106,8 @@ groups:
     endpoint: overview
   - title: Clusters
     endpoint: clusters
-  - title: Properties
-    endpoint: properties
+  - title: Tenants
+    endpoint: tenants
   - title: Brokers
     endpoint: brokers
   - title: Namespaces
diff --git a/site/_includes/admonition.html b/site/_includes/admonition.html
index 280d09a0f7..a8b16bfed2 100644
--- a/site/_includes/admonition.html
+++ b/site/_includes/admonition.html
@@ -19,7 +19,7 @@
 
 -->
 <div class="admonition">
-  <div class="{% if include.type %}{{ include.type }}{% else %}info{% endif %}">
+  <div class="{% if include.type %}{{ include.type }}{% else %}info{% endif %}"{% if include.id %} id="{{ include.id }}"{% endif %}>
     {% if include.title %}<span class="admonition-title">{{ include.title | markdownify }}</span>{% endif %}
     {% if include.content %}{{ include.content | markdownify }}{% endif %}
   </div>
diff --git a/site/_includes/explanations/client-url.md b/site/_includes/explanations/client-url.md
index 3f76419944..eac18a7b42 100644
--- a/site/_includes/explanations/client-url.md
+++ b/site/_includes/explanations/client-url.md
@@ -38,17 +38,3 @@ If you're using [TLS](../../admin/Authz#tls-client-auth) authentication, the URL
 ```
 pulsar+ssl://pulsar.us-west.example.com:6651
 ```
-
-### Global vs. cluster-specific topics
-
-Pulsar {% popover topics %} can be either cluster specific or global. Cluster-specific topic URLs have this structure:
-
-{% include topic.html p="property" c="cluster" n="namespace" t="topic" %}
-
-If the topic that a client needs to publish to or consume from is specific to a cluster, the client will need to use the broker service URL that you assigned to that cluster when [initializing its metadata](../../deployment/InstanceSetup#cluster-metadata-initialization).
-
-If the topic is global, however, the URL for the topic will look like this:
-
-{% include topic.html p="property" c="global" n="namespace" t="topic" %}
-
-In that case, your client can use the broker service URL for *any* cluster in the {% popover instance %} and Pulsar's internal service discovery system will handle the rest.
diff --git a/site/_includes/explanations/deploying-zk.md b/site/_includes/explanations/deploying-zk.md
index 917f45efee..5444317a13 100644
--- a/site/_includes/explanations/deploying-zk.md
+++ b/site/_includes/explanations/deploying-zk.md
@@ -59,19 +59,17 @@ Once each server has been added to the `zookeeper.conf` configuration and has th
 $ bin/pulsar-daemon start zookeeper
 ```
 
-### Deploying global ZooKeeper
+### Deploying the configuration store {#configuration-store}
 
-The ZooKeeper cluster configured and started up in the section above is a *local* ZooKeeper cluster used to manage a single Pulsar {% popover cluster %}. In addition to a local cluster, however, a full Pulsar {% popover instance %} also requires a *global* ZooKeeper quorum for handling some instance-level configuration and coordination tasks.
+The ZooKeeper cluster configured and started up in the section above is a *local* ZooKeeper cluster used to manage a single Pulsar {% popover cluster %}. In addition to a local cluster, however, a full Pulsar {% popover instance %} also requires a {% popover configuration store %} for handling some instance-level configuration and coordination tasks.
 
-If you're deploying a [single-cluster](#single-cluster-pulsar-instance) instance, then you will not need a separate cluster for global ZooKeeper. If, however, you're deploying a [multi-cluster](#multi-cluster-pulsar-instance) instance, then you should stand up a separate ZooKeeper cluster for instance-level tasks.
-
-{% include message.html id="global_cluster" %}
+If you're deploying a [single-cluster](#single-cluster-pulsar-instance) instance, then you will not need a separate cluster for the configuration store. If, however, you're deploying a [multi-cluster](#multi-cluster-pulsar-instance) instance, then you should stand up a separate ZooKeeper cluster for configuration tasks.
 
 #### Single-cluster Pulsar instance
 
-If your Pulsar {% popover instance %} will consist of just one cluster, then you can deploy {% popover global ZooKeeper %} on the same machines as the local ZooKeeper quorum but running on different TCP ports.
+If your Pulsar {% popover instance %} will consist of just one cluster, then you can deploy a {% popover configuration store %} on the same machines as the local ZooKeeper quorum but running on different TCP ports.
 
-To deploy global ZooKeeper in a single-cluster instance, add the same ZooKeeper servers used by the local quorom to the configuration file in [`conf/global_zookeeper.conf`](../../reference/Configuration#global-zookeeper) using the same method for [local ZooKeeper](#local-zookeeper), but make sure to use a different port (2181 is the default for ZooKeeper). Here's an example that uses port 2184 for a three-node ZooKeeper cluster:
+To deploy a ZooKeeper configuration store in a single-cluster instance, add the same ZooKeeper servers used by the local quorom to the configuration file in [`conf/global_zookeeper.conf`](../../reference/Configuration#configuration-store) using the same method for [local ZooKeeper](#local-zookeeper), but make sure to use a different port (2181 is the default for ZooKeeper). Here's an example that uses port 2184 for a three-node ZooKeeper cluster:
 
 ```properties
 clientPort=2184
diff --git a/site/_includes/explanations/install-package.md b/site/_includes/explanations/install-package.md
index f2f7d82110..bf6da15f18 100644
--- a/site/_includes/explanations/install-package.md
+++ b/site/_includes/explanations/install-package.md
@@ -56,6 +56,7 @@ Directory | Contains
 :---------|:--------
 `bin` | Pulsar's [command-line tools](../../reference/CliTools), such as [`pulsar`](../../reference/CliTools#pulsar) and [`pulsar-admin`](../../reference/CliTools#pulsar-admin)
 `conf` | Configuration files for Pulsar, including for [broker configuration](../../reference/Configuration#broker), [ZooKeeper configuration](../../reference/Configuration#zookeeper), and more
+`examples` | A Java JAR file containing example [Pulsar Functions](../../functions/overview)
 `lib` | The [JAR](https://en.wikipedia.org/wiki/JAR_(file_format)) files used by Pulsar
 `licenses` | License files, in `.txt` form, for various components of the Pulsar [codebase](../../project/Codebase)
 
@@ -64,4 +65,5 @@ These directories will be created once you begin running Pulsar:
 Directory | Contains
 :---------|:--------
 `data` | The data storage directory used by {% popover ZooKeeper %} and {% popover BookKeeper %}
+`instances` | Artifacts created for [Pulsar Functions](../../functions/overview)
 `logs` | Logs created by the installation
diff --git a/site/_includes/explanations/non-persistent-topics.md b/site/_includes/explanations/non-persistent-topics.md
index a94358e585..178bd8c844 100644
--- a/site/_includes/explanations/non-persistent-topics.md
+++ b/site/_includes/explanations/non-persistent-topics.md
@@ -25,4 +25,4 @@ Pulsar also, however, supports **non-persistent topics**, which are topics on wh
 
 Non-persistent topics have names of this form (note the `non-persistent` in the name):
 
-{% include topic.html type="non-persistent" p="property" c="cluster" n="namespace" t="topic" %}
\ No newline at end of file
+{% include topic.html type="non-persistent" ten="tenant" n="namespace" t="topic" %}
\ No newline at end of file
diff --git a/site/_includes/explanations/partitioned-topic-admin.md b/site/_includes/explanations/partitioned-topic-admin.md
index 5575e57b55..9915c2ffd3 100644
--- a/site/_includes/explanations/partitioned-topic-admin.md
+++ b/site/_includes/explanations/partitioned-topic-admin.md
@@ -23,7 +23,7 @@ You can use Pulsar's [admin API](../../admin-api/overview) to create and manage
 
 In all of the instructions and commands below, the topic name structure is:
 
-{% include topic.html p="property" c="cluster" n="namespace" t="topic" %}
+{% include topic.html ten="tenant" n="namespace" t="topic" %}
 
 ### Create
 
diff --git a/site/_includes/explanations/partitioned-topics.md b/site/_includes/explanations/partitioned-topics.md
index c0d4e210a2..0d362b64d4 100644
--- a/site/_includes/explanations/partitioned-topics.md
+++ b/site/_includes/explanations/partitioned-topics.md
@@ -25,13 +25,13 @@ Behind the scenes, a partitioned topic is actually implemented as N internal top
 
 The diagram below illustrates this:
 
-![Partitioned Topic](/img/pulsar_partitioned_topic.jpg)
+{% img /img/partitioning.png 70 %}
 
-Here, the topic **T1** has five partitions (**P0** through **P4**) split across three brokers. Because there are more partitions than brokers, two brokers handle two partitions a piece, while the third handles only one (again, Pulsar handles this distribution of partitions automatically).
+Here, the topic **Topic1** has five partitions (**P0** through **P4**) split across three brokers. Because there are more partitions than brokers, two brokers handle two partitions a piece, while the third handles only one (again, Pulsar handles this distribution of partitions automatically).
 
 Messages for this topic are broadcast to two {% popover consumers %}. The [routing mode](#routing-modes) determines both which broker handles each partition, while the [subscription mode](../../getting-started/ConceptsAndArchitecture#subscription-modes) determines which messages go to which consumers.
 
-Decisions about routing and subscription modes can be made separately in most cases. Throughput concerns should guide partitioning/routing decisions while subscription decisions should be guided by application semantics.
+Decisions about routing and subscription modes can be made separately in most cases. In general, throughput concerns should guide partitioning/routing decisions while subscription decisions should be guided by application semantics.
 
 There is no difference between partitioned topics and normal topics in terms of how subscription modes work, as partitioning only determines what happens between when a message is published by a {% popover producer %} and processed and {% popover acknowledged %} by a {% popover consumer %}.
 
diff --git a/site/_includes/explanations/properties-namespaces.md b/site/_includes/explanations/properties-namespaces.md
deleted file mode 100644
index e2260d87e7..0000000000
--- a/site/_includes/explanations/properties-namespaces.md
+++ /dev/null
@@ -1,42 +0,0 @@
-<!--
-
-    Licensed to the Apache Software Foundation (ASF) under one
-    or more contributor license agreements.  See the NOTICE file
-    distributed with this work for additional information
-    regarding copyright ownership.  The ASF licenses this file
-    to you under the Apache License, Version 2.0 (the
-    "License"); you may not use this file except in compliance
-    with the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-    Unless required by applicable law or agreed to in writing,
-    software distributed under the License is distributed on an
-    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-    KIND, either express or implied.  See the License for the
-    specific language governing permissions and limitations
-    under the License.
-
--->
-
-Pulsar was designed from the ground up to be a {% popover multi-tenant %} system. In Pulsar, {% popover tenants %} are identified by [properties](#properties). Properties are the highest administrative unit within a Pulsar {% popover instance %}. Within properties
-
-### Properties
-
-To each property in a Pulsar instance you can assign:
-
-* An [authorization](../../admin/Authz#authorization) scheme
-* The set of {% popover clusters %} to which the property applies
-
-### Namespaces
-
-{% popover Properties %} and {% popover namespaces %} are two key concepts of Pulsar to support {% popover multi-tenancy %}.
-
-* A **property** identifies a {% popover tenant %}. Pulsar is provisioned for a specified property with appropriate capacity allocated to the property.
-* A **namespace** is the administrative unit nomenclature within a property. The configuration policies set on a namespace apply to all the topics created in such namespace. A property may create multiple namespaces via self-administration using REST API and CLI tools. For instance, a property with different applications can create a separate namespace for each application.
-
-Names for topics in the same namespace will look like this:
-
-{% include topic.html p="my-property" c="us-w" n="my-app1" t="my-topic-1" %}
-{% include topic.html p="my-property" c="us-w" n="my-app1" t="my-topic-2" %}
-{% include topic.html p="my-property" c="us-w" n="my-app1" t="my-topic-3" %}
diff --git a/site/_includes/explanations/service-discovery-setup.md b/site/_includes/explanations/service-discovery-setup.md
index 9284c1fe49..4e4ce9aee7 100644
--- a/site/_includes/explanations/service-discovery-setup.md
+++ b/site/_includes/explanations/service-discovery-setup.md
@@ -31,14 +31,14 @@ Many large-scale deployment systems, such as [Kubernetes](../../deployment/Kuber
 
 The service discovery mechanism included with Pulsar maintains a list of active brokers, stored in {% popover ZooKeeper %}, and supports lookup using HTTP and also Pulsar's [binary protocol](../../project/BinaryProtocol).
 
-To get started setting up Pulsar's built-in service discovery, you need to change a few parameters in the [`conf/discovery.conf`](../../reference/Configuration#service-discovery) configuration file. Set the [`zookeeperServers`](../../reference/Configuration#service-discovery-zookeeperServers) parameter to the global ZooKeeper quorum connection string and the [`globalZookeeperServers`](../../reference/Configuration#service-discovery-globalZookeeperServers)
+To get started setting up Pulsar's built-in service discovery, you need to change a few parameters in the [`conf/discovery.conf`](../../reference/Configuration#service-discovery) configuration file. Set the [`zookeeperServers`](../../reference/Configuration#service-discovery-zookeeperServers) parameter to the cluster's ZooKeeper quorum connection string and the [`configurationStoreServers`](../../reference/Configuration#service-discovery-configurationStoreServers) setting to the {% popover configuration store %} quorum connection string.
 
 ```properties
 # Zookeeper quorum connection string
 zookeeperServers=zk1.us-west.example.com:2181,zk2.us-west.example.com:2181,zk3.us-west.example.com:2181
 
-# Global zookeeper quorum connection string
-globalZookeeperServers=zk1.us-west.example.com:2184,zk2.us-west.example.com:2184,zk3.us-west.example.com:2184
+# Global configuration store connection string
+configurationStoreServers=zk1.us-west.example.com:2184,zk2.us-west.example.com:2184,zk3.us-west.example.com:2184
 ```
 
 To start the discovery service:
diff --git a/site/_includes/explanations/tenants-namespaces.md b/site/_includes/explanations/tenants-namespaces.md
new file mode 100644
index 0000000000..13841a70bf
--- /dev/null
+++ b/site/_includes/explanations/tenants-namespaces.md
@@ -0,0 +1,42 @@
+<!--
+
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing,
+    software distributed under the License is distributed on an
+    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+    KIND, either express or implied.  See the License for the
+    specific language governing permissions and limitations
+    under the License.
+
+-->
+
+Pulsar was designed from the ground up to be a {% popover multi-tenant %} system. In Pulsar, {% popover tenants %} are the highest administrative unit within a Pulsar {% popover instance %}.
+
+### Tenants
+
+To each property in a Pulsar instance you can assign:
+
+* An [authorization](../../admin/Authz#authorization) scheme
+* The set of {% popover clusters %} to which the tenant's configuration applies
+
+### Namespaces
+
+{% popover Tenants %} and {% popover namespaces %} are two key concepts of Pulsar to support {% popover multi-tenancy %}.
+
+* Pulsar is provisioned for specified {% popover tenants %} with appropriate capacity allocated to the tenant.
+* A {% popover namespace %} is the administrative unit nomenclature within a tenant. The configuration policies set on a namespace apply to all the topics created in that namespace. A tenant may create multiple namespaces via self-administration using the REST API and the [`pulsar-admin`](../../reference/CliTools#pulsar-admin) CLI tool. For instance, a tenant with different applications can create a separate namespace for each application.
+
+Names for topics in the same namespace will look like this:
+
+{% include topic.html ten="my-tenant" n="my-app1" t="my-topic-1" %}
+{% include topic.html ten="my-tenant" n="my-app1" t="my-topic-2" %}
+{% include topic.html ten="my-tenant" n="my-app1" t="my-topic-3" %}
diff --git a/site/_includes/figure.html b/site/_includes/figure.html
new file mode 100644
index 0000000000..cfda529ec7
--- /dev/null
+++ b/site/_includes/figure.html
@@ -0,0 +1,21 @@
+<!--
+
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing,
+    software distributed under the License is distributed on an
+    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+    KIND, either express or implied.  See the License for the
+    specific language governing permissions and limitations
+    under the License.
+
+-->
+<img src="{{ include.src }}"{{ if include.width }} width="{{ include.width }}%"{{ endif }}{{ if include.alt }} alt="{{ include.alt }}"{{ endif }}>
\ No newline at end of file
diff --git a/site/_includes/pulsar-2.html b/site/_includes/pulsar-2.html
new file mode 100644
index 0000000000..354780bb56
--- /dev/null
+++ b/site/_includes/pulsar-2.html
@@ -0,0 +1,23 @@
+<!--
+
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing,
+    software distributed under the License is distributed on an
+    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+    KIND, either express or implied.  See the License for the
+    specific language governing permissions and limitations
+    under the License.
+
+-->
+
+{% include admonition.html type="success" title="Pulsar version 2.0" content="
+The documentation that you're reading is for the 2.0 release of Apache Pulsar. For more information on Pulsar 2.0, see [this guide](../../getting-started/Pulsar-2.0)." %}
\ No newline at end of file
diff --git a/site/_includes/topic.html b/site/_includes/topic.html
index 00d89ba06d..c69286f06c 100644
--- a/site/_includes/topic.html
+++ b/site/_includes/topic.html
@@ -19,5 +19,5 @@
 
 -->
 <section class="topic">
-  {% if include.type %}{{ include.type }}{% else %}persistent{% endif %}://<span class="property">{{ include.p }}</span>/<span class="cluster">{{ include.c }}</span>/<span class="namespace">{{ include.n }}</span>/<span class="t">{{ include.t }}</span>
+  {% if include.type %}{{ include.type }}{% else %}persistent{% endif %}://{% if include.ten %}<span class="property">{{ include.ten }}</span>/{% endif %}{% if include.c %}<span class="cluster">{{ include.c }}</span>/{% endif %}{% if include.n %}<span class="namespace">{{ include.n }}</span>/{% endif %}{% if include.t %}<span class="t">{{ include.t }}</span>{% endif %}
 </section>
diff --git a/site/_layouts/docs.html b/site/_layouts/docs.html
index 6420fbaf47..be7a1dfc19 100644
--- a/site/_layouts/docs.html
+++ b/site/_layouts/docs.html
@@ -46,9 +46,9 @@ <h1 class="docs-title">
           {% endfor %}
         </section>
 
-        {% include version-warning.html %}
+        {% unless page.hide_pulsar2_notification %}{% include pulsar-2.html %}{% endunless %}
 
-        <!-- <hr class="hr"> -->
+        <hr class="hr">
       </section>
 
       <section class="content">
diff --git a/site/_sass/_docs.scss b/site/_sass/_docs.scss
index 8d55b5c465..f5ee27867e 100644
--- a/site/_sass/_docs.scss
+++ b/site/_sass/_docs.scss
@@ -112,9 +112,10 @@
       .topic, .fqfn {
         color: $sx-light-gray;
         background-color: $black;
-        font-size: $code-font-size;
+        font-size: $code-font-size * 1.25;
         font-family: $font-family-monospace;
-        padding: 10px 0 10px 20px;
+        font-weight: 700;
+        padding: .5rem 0 .5rem 1rem;
         border-radius: 0;
 
         .property, .tenant { color: $sx-olive; }
@@ -122,8 +123,10 @@
         .namespace { color: $sx-7; }
         .t { color: $sx-magenta; }
 
-        & + p {
-          margin-top: 20px;
+        & ~ {
+          p, h2, h3, h4, h5 {
+            margin-top: 20px;
+          }
         }
       }
 
diff --git a/site/docs/latest/admin-api/partitioned-topics.md b/site/docs/latest/admin-api/partitioned-topics.md
index 0d79cbfa37..87444daaa9 100644
--- a/site/docs/latest/admin-api/partitioned-topics.md
+++ b/site/docs/latest/admin-api/partitioned-topics.md
@@ -27,7 +27,7 @@ You can use Pulsar's [admin API](../../admin-api/overview) to create and manage
 
 In all of the instructions and commands below, the topic name structure is:
 
-{% include topic.html p="property" c="cluster" n="namespace" t="topic" %}
+{% include topic.html ten="tenant" n="namespace" t="topic" %}
 
 ## Partitioned topics resources
 
diff --git a/site/docs/latest/admin-api/persistent-topics.md b/site/docs/latest/admin-api/persistent-topics.md
index 1b2b403e79..20c0fcf5ee 100644
--- a/site/docs/latest/admin-api/persistent-topics.md
+++ b/site/docs/latest/admin-api/persistent-topics.md
@@ -27,7 +27,7 @@ Persistent helps to access topic which is a logical endpoint for publishing and
 
 In all of the instructions and commands below, the topic name structure is:
 
-{% include topic.html p="property" c="cluster" n="namespace" t="topic" %}
+{% include topic.html ten="tenant" n="namespace" t="topic" %}
 
 ## Persistent topics resources
 
diff --git a/site/docs/latest/admin-api/properties.md b/site/docs/latest/admin-api/properties.md
deleted file mode 100644
index 7c1dde1cf4..0000000000
--- a/site/docs/latest/admin-api/properties.md
+++ /dev/null
@@ -1,105 +0,0 @@
----
-title: Managing properties
----
-
-<!--
-
-    Licensed to the Apache Software Foundation (ASF) under one
-    or more contributor license agreements.  See the NOTICE file
-    distributed with this work for additional information
-    regarding copyright ownership.  The ASF licenses this file
-    to you under the Apache License, Version 2.0 (the
-    "License"); you may not use this file except in compliance
-    with the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-    Unless required by applicable law or agreed to in writing,
-    software distributed under the License is distributed on an
-    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-    KIND, either express or implied.  See the License for the
-    specific language governing permissions and limitations
-    under the License.
-
--->
-
-Properties, like namespaces, can be managed using the [admin API](../../admin-api/overview). There are currently two configurable aspects of properties:
-
-* Admin roles
-* Allowed clusters
-
-
-## Properties resources
-
-### List
-
-#### pulsar-admin
-
-You can list all of the properties associated with an {% popover instance %} using the [`list`](../../reference/CliTools#pulsar-admin-properties-list) subcommand:
-
-```shell
-$ pulsar-admin properties list
-```
-
-That will return a simple list, like this:
-
-```
-my-property-1
-my-property-2
-```
-
-### Create
-
-#### pulsar-admin
-
-You can create a new property using the [`create`](../../reference/CliTools#pulsar-admin-properties-create) subcommand:
-
-```shell
-$ pulsar-admin properties create my-property
-```
-
-When creating a property, you can assign admin roles using the `-r`/`--admin-roles` flag. You can specify multiple roles as a comma-separated list. Here are some examples:
-
-```shell
-$ pulsar-admin properties create my-property \
-  --admin-roles role1,role2,role3
-
-$ pulsar-admin properties create my-property \
-  -r role1
-```
-
-### Get configuration
-
-#### pulsar-admin
-
-You can see a property's configuration as a JSON object using the [`get`](../../reference/CliTools#pulsar-admin-properties-get) subcommand and specifying the name of the property:
-
-```shell
-$ pulsar-admin properties get my-property
-{
-  "adminRoles": [
-    "admin1",
-    "admin2"
-  ],
-  "allowedClusters": [
-    "cl1",
-    "cl2"
-  ]
-}
-```
-
-### Delete
-
-#### pulsar-adnin
-
-You can delete a property using the [`delete`](../../reference/CliTools#pulsar-admin-properties-delete) subcommand and specifying the property name:
-
-```shell
-$ pulsar-admin properties delete my-property
-```
-
-### Updating
-
-#### pulsar-admin
-
-You can update a property's configuration using the [`update`](../../reference/CliTools#pulsar-admin-properties-update) subcommand
diff --git a/site/docs/latest/admin-api/tenants.md b/site/docs/latest/admin-api/tenants.md
new file mode 100644
index 0000000000..a0d90d7beb
--- /dev/null
+++ b/site/docs/latest/admin-api/tenants.md
@@ -0,0 +1,104 @@
+---
+title: Managing tenants
+---
+
+<!--
+
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing,
+    software distributed under the License is distributed on an
+    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+    KIND, either express or implied.  See the License for the
+    specific language governing permissions and limitations
+    under the License.
+
+-->
+
+Tenants, like namespaces, can be managed using the [admin API](../../admin-api/overview). There are currently two configurable aspects of tenants:
+
+* Admin roles
+* Allowed clusters
+
+## Tenant resources
+
+### List
+
+#### pulsar-admin
+
+You can list all of the tenants associated with an {% popover instance %} using the [`list`](../../reference/CliTools#pulsar-admin-tenants-list) subcommand:
+
+```shell
+$ pulsar-admin tenants list
+```
+
+That will return a simple list, like this:
+
+```
+my-tenant-1
+my-tenant-2
+```
+
+### Create
+
+#### pulsar-admin
+
+You can create a new tenant using the [`create`](../../reference/CliTools#pulsar-admin-tenants-create) subcommand:
+
+```shell
+$ pulsar-admin tenants create my-tenant
+```
+
+When creating a tenant, you can assign admin roles using the `-r`/`--admin-roles` flag. You can specify multiple roles as a comma-separated list. Here are some examples:
+
+```shell
+$ pulsar-admin tenants create my-tenant \
+  --admin-roles role1,role2,role3
+
+$ pulsar-admin tenants create my-tenant \
+  -r role1
+```
+
+### Get configuration
+
+#### pulsar-admin
+
+You can see a tenant's configuration as a JSON object using the [`get`](../../reference/CliTools#pulsar-admin-tenants-get) subcommand and specifying the name of the tenant:
+
+```shell
+$ pulsar-admin tenants get my-tenant
+{
+  "adminRoles": [
+    "admin1",
+    "admin2"
+  ],
+  "allowedClusters": [
+    "cl1",
+    "cl2"
+  ]
+}
+```
+
+### Delete
+
+#### pulsar-adnin
+
+You can delete a tenant using the [`delete`](../../reference/CliTools#pulsar-admin-tenants-delete) subcommand and specifying the tenant name:
+
+```shell
+$ pulsar-admin tenants delete my-tenant
+```
+
+### Updating
+
+#### pulsar-admin
+
+You can update a tenant's configuration using the [`update`](../../reference/CliTools#pulsar-admin-tenants-update) subcommand
diff --git a/site/docs/latest/admin/Authz.md b/site/docs/latest/admin/Authz.md
index 9089c37268..d33e83ea25 100644
--- a/site/docs/latest/admin/Authz.md
+++ b/site/docs/latest/admin/Authz.md
@@ -271,7 +271,7 @@ A client that successfully identified itself as having the role `my-admin-role`
 
 The structure of topic names in Pulsar reflects the hierarchy between properties, clusters, and [namespaces](#managing-namespaces):
 
-{% include topic.html p="property" c="cluster" n="namespace" t="topic" %}
+{% include topic.html ten="tenant" n="namespace" t="topic" %}
 
 ## Managing permissions
 
diff --git a/site/docs/latest/admin/Proxy.md b/site/docs/latest/admin/Proxy.md
index 07f1bc04d4..628bd79c14 100644
--- a/site/docs/latest/admin/Proxy.md
+++ b/site/docs/latest/admin/Proxy.md
@@ -27,7 +27,7 @@ The [Pulsar proxy](../../getting-started/ConceptsAndArchitecture#pulsar-proxy) i
 
 ## Running the proxy
 
-In order to run the Pulsar proxy, you need to have both a local and global [ZooKeeper](https://zookeeper.apache.org) quorum set up for use by your Pulsar cluster. For instructions, see [this document](../../deployment/cluster). Once you have ZooKeeper set up and have connection strings for both ZooKeeper quorums, you can use the [`proxy`](../../reference/CliTools#pulsar-proxy) command of the [`pulsar`](../../reference/CliTools#pulsar) CLI tool to start up the proxy (preferably on its own machine or in its own VM):
+In order to run the Pulsar proxy, you need to have both a local [ZooKeeper](https://zookeeper.apache.org) and {% popover configuration store %} quorum set up for use by your Pulsar cluster. For instructions, see [this document](../../deployment/cluster). Once you have ZooKeeper set up and have connection strings for both ZooKeeper quorums, you can use the [`proxy`](../../reference/CliTools#pulsar-proxy) command of the [`pulsar`](../../reference/CliTools#pulsar) CLI tool to start up the proxy (preferably on its own machine or in its own VM):
 
 To start the proxy:
 
diff --git a/site/docs/latest/admin/ZooKeeperBookKeeper.md b/site/docs/latest/admin/ZooKeeperBookKeeper.md
index 25204eeda8..1d673599d0 100644
--- a/site/docs/latest/admin/ZooKeeperBookKeeper.md
+++ b/site/docs/latest/admin/ZooKeeperBookKeeper.md
@@ -176,4 +176,4 @@ This diagram illustrates the role of ZooKeeper and BookKeeper in a Pulsar cluste
 
 ![ZooKeeper and BookKeeper](/img/pulsar_system_architecture.png)
 
-Each Pulsar {% popover cluster %} consists of one or more message {% popover brokers %}. Each broker relies on an ensemble of {% popover bookies %} 
+Each Pulsar {% popover cluster %} consists of one or more message {% popover brokers %}. Each broker relies on an ensemble of {% popover bookies %}.
diff --git a/site/docs/latest/clients/Java.md b/site/docs/latest/clients/Java.md
index 59b00164a5..c0ca3c648b 100644
--- a/site/docs/latest/clients/Java.md
+++ b/site/docs/latest/clients/Java.md
@@ -24,9 +24,7 @@ tags: [client, java]
 
 -->
 
-The Pulsar Java client can be used both to create Java {% popover producers %} and {% popover consumers %} of messages but also to perform [administrative tasks](../../admin-api/overview).
-
-The current version of the Java client is **{{ site.current_version }}**.
+The Pulsar Java client can be used both to create Java {% popover producers %}, {% popover consumers %}, and [readers](#readers) of messages and to perform [administrative tasks](../../admin-api/overview). The current version of the Java client is **{{ site.current_version }}**.
 
 Javadoc for the Pulsar client is divided up into two domains, by package:
 
@@ -100,7 +98,7 @@ In Pulsar, {% popover producers %} write {% popover messages %} to {% popover to
 ```java
 String topic = "persistent://sample/standalone/ns1/my-topic";
 
-Producer producer = client.newProducer()
+Producer producer<byte[]> = client.newProducer()
         .topic(topic)
         .create();
 ```
@@ -108,16 +106,33 @@ Producer producer = client.newProducer()
 You can then send messages to the broker and topic you specified:
 
 ```java
+import org.apache.pulsar.client.api.MessageBuilder;
+
+import java.util.stream.IntStream;
+
+MessageBuilder<byte[]> msgBuilder = MessageBuilder.create();
+
 // Publish 10 messages to the topic
-for (int i = 0; i < 10; i++) {
-    Message<byte[]> msg = MessageBuilder.create()
-            .setContent(String.format("Message number %d", i).getBytes())
-            .build();
-    producer.send(msg);
-}
+IntStream.range(1, 11).forEach(i -> {
+    msgBuilder.setContent(String.format("Message number %d", i).getBytes());
+
+    try {
+        producer.send(msgBuilder);
+    } catch (PulsarClientException e) {
+        e.printStackTrace();
+    }
+});
 ```
 
-{% include admonition.html type='warning' content="
+By default, producers produce messages that consist of byte arrays. You can produce different types, however, by specifying a message [schema](#schemas).
+
+```java
+Producer<String> stringProducer = client.newProducer(new StringSchema())
+        .topic(topic)
+        .create();
+```
+
+{% include admonition.html type='warning' content='
 You should always make sure to close your producers, consumers, and clients when they are no longer needed:
 
 ```java
@@ -126,22 +141,19 @@ consumer.close();
 client.close();
 ```
 
-Closer operations can also be asynchronous:
+Close operations can also be asynchronous:
 
 ```java
-producer.asyncClose();
-consumer.asyncClose();
-clioent.asyncClose();
+producer.closeAsync().thenRun(() -> System.out.println("Producer closed"));
 ```
-" %}
-
+' %}
 
 ### Configuring producers
 
 If you instantiate a `Producer` object specifying only a topic name, as in the example above, the producer will use the default configuration. To use a non-default configuration, there's a variety of configurable parameters that you can set. For a full listing, see the Javadoc for the {% javadoc ProducerBuilder client org.apache.pulsar.client.api.ProducerBuilder %} class. Here's an example:
 
 ```java
-Producer producer = client.newProducer()
+Producer<byte[]> producer = client.newProducer()
         .topic(topic)
         .enableBatching(true)
         .sendTimeout(10, TimeUnit.SECONDS)
@@ -155,15 +167,18 @@ When using {% popover partitioned topics %}, you can specify the routing mode wh
 
 ### Async send
 
-You can publish messages [asynchronously](../../getting-started/ConceptsAndArchitecture#send-modes) using the Java client. With async send, the producer will put the message in a blocking queue and return immediately. The client library will then send the message to the {% popover broker %} in the background. If the queue is full (max size configurable), the producer could be blocked or fail immediately when calling the API, depending on arguments passed to the producer.
+You can also publish messages [asynchronously](../../getting-started/ConceptsAndArchitecture#send-modes) using the Java client. With async send, the producer will put the message in a blocking queue and return immediately. The client library will then send the message to the {% popover broker %} in the background. If the queue is full (max size configurable), the producer could be blocked or fail immediately when calling the API, depending on arguments passed to the producer.
 
 Here's an example async send operation:
 
 ```java
 CompletableFuture<MessageId> future = producer.sendAsync("my-async-message".getBytes());
+future.thenAccept(msgId -> {
+        System.out.printf("Message with ID %s successfully sent", new String(msgId.toByteArray());
+});
 ```
 
-Async send operations return a {% javadoc MessageId client org.apache.pulsar.client.api.MessageId %} wrapped in a [`CompletableFuture`](http://www.baeldung.com/java-completablefuture).
+As you can see from the example above, async send operations return a {% javadoc MessageId client org.apache.pulsar.client.api.MessageId %} wrapped in a [`CompletableFuture`](http://www.baeldung.com/java-completablefuture).
 
 ## Consumers
 
@@ -172,17 +187,16 @@ In Pulsar, {% popover consumers %} subscribe to {% popover topics %} and handle
 Once you've instantiated a {% javadoc PulsarClient client org.apache.pulsar.client.api.PulsarClient %} object, you can create a {% javadoc Consumer client org.apache.pulsar.client.api.Consumer %} by specifying a {% popover topic %} and a [subscription](../../getting-started/ConceptsAndArchitecture#subscription-modes).
 
 ```java
-String topic = "persistent://sample/standalone/ns1/my-topic"; // from above
-String subscription = "my-subscription";
 Consumer consumer = client.newConsumer()
-        .subscriptionName("my-subscription-1")
+        .topic("my-topic")
+        .subscriptionName("my-subscription")
         .subscribe();
 ```
 
 The `subscribe` method will automatically subscribe the consumer to the specified topic and subscription. One way to make the consumer listen on the topic is to set up a `while` loop. In this example loop, the consumer listens for messages, prints the contents of any message that's received, and then {% popover acknowledges %} that the message has been processed:
 
 ```java
-while (true) {
+do {
   // Wait for a message
   Message msg = consumer.receive();
 
@@ -190,7 +204,7 @@ while (true) {
 
   // Acknowledge the message so that it can be deleted by the message broker
   consumer.acknowledge(msg);
-}
+} while (true);
 ```
 
 ### Configuring consumers
@@ -201,8 +215,8 @@ Here's an example configuration:
 
 ```java
 Consumer consumer = client.newConsumer()
-        .topic(topic)
-        .subscriptionName(subscription)
+        .topic("my-topic")
+        .subscriptionName("my-subscription")
         .ackTimeout(10, TimeUnit.SECONDS)
         .subscriptionType(SubscriptionType.Exclusive)
         .subscribe();
@@ -234,18 +248,18 @@ import java.util.Arrays;
 import java.util.List;
 import java.util.regex.Pattern;
 
-ConsumerBuilder bldr = pulsarClient.newConsumer()
+ConsumerBuilder consumerBuilder = pulsarClient.newConsumer()
         .subscriptionName(subscription);
 
 // Subscribe to all topics in a namespace
 Pattern allTopicsInNamespace = Pattern.compile("persistent://sample/standalone/ns1/.*");
-Consumer allTopicsConsumer = bldr
+Consumer allTopicsConsumer = consumerBuilder
         .topicsPattern(allTopicsInNamespace)
         .subscribe();
 
 // Subscribe to a subsets of topics in a namespace, based on regex
 Pattern someTopicsInNamespace = Pattern.compile("persistent://sample/standalone/ns1/foo.*");
-Consumer allTopicsConsumer = bldr
+Consumer allTopicsConsumer = consumerBuilder
         .topicsPattern(someTopicsInNamespace)
         .subscribe();
 ```
@@ -259,12 +273,12 @@ List<String> topics = Arrays.asList(
         "persistent://sample/standalone/ns3/topic-3"
 );
 
-Consumer multiTopicConsumer = bldr
+Consumer multiTopicConsumer = consumerBuilder
         .topics(topics)
         .subscribe();
 
 // Alternatively:
-Consumer multiTopicConsumer = bldr
+Consumer multiTopicConsumer = consumerBuilder
         .topics(
             "persistent://sample/standalone/ns1/topic-1",
             "persistent://sample/standalone/ns2/topic-2",
@@ -277,12 +291,38 @@ You can also subscribe to multiple topics asynchronously using the `subscribeAsy
 
 ```java
 Pattern allTopicsInNamespace = Pattern.compile("persistent://sample/standalone/ns1/.*");
-CompletableFuture<Consumer> consumer = bldr
+consumerBuilder
         .topics(topics)
-        .subscribeAsync();
+        .subscribeAsync()
+        .thenAccept(consumer -> {
+            do {
+                try {
+                    Message msg = consumer.receive();
+                    // Do something with the received message
+                } catch (PulsarClientException e) {
+                    e.printStackTrace();
+                }
+            } while (true);
+        });
 ```
 
-## Reader interface
+## Message schemas {#schemas}
+
+In Pulsar, all message data consists of byte arrays. Message **schemas** enable you to use other types of data when constructing and handling messages (from simple types like strings to more complex, application-specific types). If you construct, say, a [producer](#producers) without specifying a schema, then the producer can only produce messages of type `byte[]`. Here's an example:
+
+```java
+Producer producer = client.newProducer()
+        .topic(topic)
+        .create();
+```
+
+The producer above is equivalent to a `Producer<byte[]>` (in fact, you should always explicitly specify the type). If you'd like
+
+
+
+The same schema-based logic applies to [consumers](#consumers) and [readers](#readers).
+
+## Reader interface {#readers}
 
 With the [reader interface](../../getting-started/ConceptsAndArchitecture#reader-interface), Pulsar clients can "manually position" themselves within a topic, reading all messages from a specified message onward. The Pulsar API for Java enables you to create  {% javadoc Reader client org.apache.pulsar.client.api.Reader %} objects by specifying a {% popover topic %}, a {% javadoc MessageId client org.apache.pulsar.client.api.MessageId %}, and {% javadoc ReaderConfiguration client org.apache.pulsar.client.api.ReaderConfiguration %}.
 
diff --git a/site/docs/latest/deployment/cluster.md b/site/docs/latest/deployment/cluster.md
index 773cfa28a6..b7fa3fc993 100644
--- a/site/docs/latest/deployment/cluster.md
+++ b/site/docs/latest/deployment/cluster.md
@@ -155,7 +155,7 @@ Flag | Description
 :----|:-----------
 `--cluster` | A name for the cluster
 `--zookeeper` | A "local" ZooKeeper connection string for the cluster. This connection string only needs to include *one* machine in the ZooKeeper cluster.
-`--configuration-store` | The configuration store (ZooKeeper) where the configuration policies for all tenants and namespaces across all clusters will be stored. As with the `--zookeeper` flag, this connection string only needs to include *one* machine in the ZooKeeper cluster.
+`--configuration-store` | The {% popover configuration store %} connection string for the entire instance. As with the `--zookeeper` flag, this connection string only needs to include *one* machine in the ZooKeeper cluster.
 `--web-service-url` | The web service URL for the cluster, plus a port. This URL should be a standard DNS name. The default port is 8080 (we don't recommend using a different port).
 `--web-service-url-tls` | If you're using [TLS](../../../admin/Authz#tls-client-auth), you'll also need to specify a TLS web service URL for the cluster. The default port is 8443 (we don't recommend using a different port).
 `--broker-service-url` | A broker service URL enabling interaction with the {% popover brokers %} in the cluster. This URL should use the same DNS name as the web service URL but should use the `pulsar` scheme instead. The default port is 6650 (we don't recommend using a different port).
@@ -199,13 +199,11 @@ This will create an ephemeral BookKeeper {% popover ledger %} on the local booki
 
 Pulsar {% popover brokers %} are the last thing you need to deploy in your Pulsar cluster. Brokers handle Pulsar messages and provide Pulsar's administrative interface. We recommend running **3 brokers**, one for each machine that's already running a BookKeeper bookie.
 
-The most important element of broker configuration is ensuring that that each broker is aware of the ZooKeeper cluster that you've deployed. Make sure that the [`zookeeperServers`](../../../reference/Configuration#broker-zookeeperServers) and [`globalZookeeperServers`](../../../reference/Configuration#broker-globalZookeeperServers) parameters.
-In this case, since we only have 1 cluster and no global ZooKeeper setup, the `globalZookeeperServers`
-will point to the same `zookeeperServers`.
+The most important element of broker configuration is ensuring that that each broker is aware of the ZooKeeper cluster that you've deployed. Make sure that the [`zookeeperServers`](../../../reference/Configuration#broker-zookeeperServers) and [`configurationStoreServers`](../../../reference/Configuration#broker-configurationStoreServers) parameters. In this case, since we only have 1 cluster and no configuration store setup, the `configurationStoreServers` will point to the same `zookeeperServers`.
 
 ```properties
 zookeeperServers=zk1.us-west.example.com:2181,zk2.us-west.example.com:2181,zk3.us-west.example.com:2181
-globalZookeeperServers=zk1.us-west.example.com:2181,zk2.us-west.example.com:2181,zk3.us-west.example.com:2181
+configurationStoreServers=zk1.us-west.example.com:2181,zk2.us-west.example.com:2181,zk3.us-west.example.com:2181
 ```
 
 You also need to specify the cluster name (matching the name that you provided when [initializing the cluster's metadata](#initializing-cluster-metadata):
diff --git a/site/docs/latest/deployment/instance.md b/site/docs/latest/deployment/instance.md
index 6a6b34ba3e..9726587c4b 100644
--- a/site/docs/latest/deployment/instance.md
+++ b/site/docs/latest/deployment/instance.md
@@ -29,7 +29,7 @@ tags: [admin, deployment, instance, bare metal]
 
 A Pulsar *instance* consists of multiple Pulsar {% popover clusters %} working in unison. Clusters can be distributed across data centers or geographical regions and can replicate amongst themselves using [geo-replication](../../admin/GeoReplication). Deploying a multi-cluster Pulsar instance involves the following basic steps:
 
-* Deploying two separate [ZooKeeper](#deploying-zookeeper) quorums: a [local](#deploying-local-zookeeper) quorum for each cluster in the instance and a [global](#deploying-global-zookeeper) quorum for instance-wide tasks
+* Deploying two separate [ZooKeeper](#deploying-zookeeper) quorums: a [local](#deploying-local-zookeeper) quorum for each cluster in the instance and a [configuration store](#configuration-store) quorum for instance-wide tasks
 * Initializing [cluster metadata](#cluster-metadata-initialization) for each cluster
 * Deploying a [BookKeeper cluster](#deploying-bookkeeper) of {% popover bookies %} in each Pulsar cluster
 * Deploying [brokers](#deploying-brokers) in each Pulsar cluster
@@ -48,7 +48,7 @@ This guide shows you how to deploy Pulsar in production in a non-Kubernetes. If
 
 ## Cluster metadata initialization
 
-Once you've set up local and global ZooKeeper for your instance, there is some metadata that needs to be written to ZooKeeper for each cluster in your instance. It only needs to be written once.
+Once you've set up the cluster-specific ZooKeeper and {% popover configuration store %} quorums for your instance, there is some metadata that needs to be written to ZooKeeper for each cluster in your instance. **It only needs to be written once**.
 
 You can initialize this metadata using the [`initialize-cluster-metadata`](../../reference/CliTools#pulsar-initialize-cluster-metadata) command of the [`pulsar`](../../reference/CliTools#pulsar) CLI tool. Here's an example:
 
@@ -56,7 +56,7 @@ You can initialize this metadata using the [`initialize-cluster-metadata`](../..
 $ bin/pulsar initialize-cluster-metadata \
   --cluster us-west \
   --zookeeper zk1.us-west.example.com:2181 \
-  --global-zookeeper zk1.us-west.example.com:2184 \
+  --configuration-store zk1.us-west.example.com:2184 \
   --web-service-url http://pulsar.us-west.example.com:8080/ \
   --web-service-url-tls https://pulsar.us-west.example.com:8443/ \
   --broker-service-url pulsar://pulsar.us-west.example.com:6650/ \
@@ -67,14 +67,10 @@ As you can see from the example above, the following needs to be specified:
 
 * The name of the cluster
 * The local ZooKeeper connection string for the cluster
-* The global ZooKeeper connection string for the entire instance
+* The {% popover configuration store %} connection string for the entire instance
 * The web service URL for the cluster
 * A broker service URL enabling interaction with the {% popover brokers %} in the cluster
 
-{% include admonition.html type="info" title="Global cluster" content='
-In each Pulsar instance, there is a `global` cluster that you can administer just like other clusters. The `global` cluster enables you to do things like create global topics.
-' %}
-
 If you're using [TLS](../../admin/Authz#tls-client-auth), you'll also need to specify a TLS web service URL for the cluster as well as a TLS broker service URL for the brokers in the cluster.
 
 Make sure to run `initialize-cluster-metadata` for each cluster in your instance.
@@ -91,7 +87,7 @@ Once you've set up ZooKeeper, initialized cluster metadata, and spun up BookKeep
 
 Brokers can be configured using the [`conf/broker.conf`](../../reference/Configuration#broker) configuration file.
 
-The most important element of broker configuration is ensuring that each broker is aware of its local ZooKeeper quorum as well as the global ZooKeeper quorum. Make sure that you set the [`zookeeperServers`](../../reference/Configuration#broker-zookeeperServers) parameter to reflect the local quorum and the [`globalZookeeperServers`](../../reference/Configuration#broker-globalZookeeperServers) parameter to reflect the global quorum (although you'll need to specify only those global ZooKeeper servers located in the same cluster).
+The most important element of broker configuration is ensuring that each broker is aware of its local ZooKeeper quorum as well as the global ZooKeeper quorum. Make sure that you set the [`zookeeperServers`](../../reference/Configuration#broker-zookeeperServers) parameter to reflect the local quorum and the [`configurationStoreServers`](../../reference/Configuration#broker-configurationStoreServers) parameter to reflect the configuration store quorum (although you'll need to specify only those ZooKeeper servers located in the same cluster).
 
 You also need to specify the name of the {% popover cluster %} to which the broker belongs using the [`clusterName`](../../reference/Configuration#broker-clusterName) parameter.
 
@@ -101,8 +97,8 @@ Here's an example configuration:
 # Local ZooKeeper servers
 zookeeperServers=zk1.us-west.example.com:2181,zk2.us-west.example.com:2181,zk3.us-west.example.com:2181
 
-# Global Zookeeper quorum connection string.
-globalZookeeperServers=zk1.us-west.example.com:2184,zk2.us-west.example.com:2184,zk3.us-west.example.com:2184
+# Configuration store quorum connection string.
+configurationStoreServers=zk1.us-west.example.com:2184,zk2.us-west.example.com:2184,zk3.us-west.example.com:2184
 
 clusterName=us-west
 ```
@@ -141,26 +137,24 @@ serviceUrl=http://pulsar.us-west.example.com:8080/
 
 ## Provisioning new tenants
 
-Pulsar was built as a fundamentally {% popover multi-tenant %} system. New tenants can be provisioned as Pulsar {% popover properties %}. Properties can be
+Pulsar was built as a fundamentally {% popover multi-tenant %} system.
 
-To allow a new tenant to use the system, we need to create a new {% popover property %}. You can create a new property using the [`pulsar-admin`](../../reference/CliTools#pulsar-admin-properties-create) CLI tool:
+To allow a new {% popover tenant %} to use the system, we need to create a new one. You can create a new tenant using the [`pulsar-admin`](../../reference/CliTools#pulsar-admin-tenants-create) CLI tool:
 
 ```shell
-$ bin/pulsar-admin properties create test-prop \
+$ bin/pulsar-admin tenants create test-tentant \
   --allowed-clusters us-west \
   --admin-roles test-admin-role
 ```
 
-This will allow users who identify with role `test-admin-role` to administer the configuration for the property `test` which will only be allowed to use the cluster `us-west`. From now on, this tenant will be able to self-manage its resources.
+This will allow users who identify with role `test-admin-role` to administer the configuration for the tenant `test` which will only be allowed to use the cluster `us-west`. From now on, this tenant will be able to self-manage its resources.
 
-Once a tenant has been created, you will need to create {% popover namespaces %} for topics within that property.
+Once a tenant has been created, you will need to create {% popover namespaces %} for topics within that tenant.
 
-The first step is to create a namespace. A namespace is an administrative unit
-that can contain many topic. Common practice is to create a namespace for each
-different use case from a single tenant.
+The first step is to create a namespace. A namespace is an administrative unit that can contain many topics. A common practice is to create a namespace for each different use case from a single tenant.
 
 ```shell
-$ bin/pulsar-admin namespaces create test-prop/us-west/ns1
+$ bin/pulsar-admin namespaces create test-tenant/ns1
 ```
 
 ##### Testing producer and consumer
@@ -173,24 +167,24 @@ created the first time a producer or a consumer tries to use them.
 
 The topic name in this case could be:
 
-{% include topic.html p="test-prop" c="us-west" n="ns1" t="my-topic" %}
+{% include topic.html ten="test-tenant" n="ns1" t="my-topic" %}
 
 Start a consumer that will create a subscription on the topic and will wait
 for messages:
 
 ```shell
-$ bin/pulsar-perf consume persistent://test-prop/us-west/ns1/my-topic
+$ bin/pulsar-perf consume persistent://test-tenant/us-west/ns1/my-topic
 ```
 
 Start a producer that publishes messages at a fixed rate and report stats every
 10 seconds:
 
 ```shell
-$ bin/pulsar-perf produce persistent://test-prop/us-west/ns1/my-topic
+$ bin/pulsar-perf produce persistent://test-tenant/us-west/ns1/my-topic
 ```
 
 To report the topic stats:
 
 ```shell
-$ bin/pulsar-admin persistent stats persistent://test-prop/us-west/ns1/my-topic
+$ bin/pulsar-admin persistent stats persistent://test-tenant/us-west/ns1/my-topic
 ```
diff --git a/site/docs/latest/getting-started/ConceptsAndArchitecture.md b/site/docs/latest/getting-started/ConceptsAndArchitecture.md
index cd92a95b32..817b3eb6f0 100644
--- a/site/docs/latest/getting-started/ConceptsAndArchitecture.md
+++ b/site/docs/latest/getting-started/ConceptsAndArchitecture.md
@@ -95,33 +95,29 @@ Client libraries can provide their own listener implementations for consumers. T
 
 ### Topics
 
-As in other pub-sub systems, topics in Pulsar are named channels for transmitting messages from producers to consumers. Topic names are URLs that have a well-defined structure:
+As in other pub-sub systems, topics in Pulsar are named channels for transmitting messages from {% popover producers %} to {% popover consumers %}. Topic names are URLs that have a well-defined structure:
 
-{% include topic.html p="property" c="cluster" n="namespace" t="topic" %}
+{% include topic.html type="{persistent|non-persistent}" ten="tenant" n="namespace" t="topic" %}
 
-| Topic name component | Description                                                                                                                                                                                                                                  |
-|:---------------------|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| `persistent`         | It identifies type of topic. Pulsar supports two kind of topics: persistent and non-persistent. In persistent topic, all messages are durably [persisted](#persistent-storage) on disk (that means on multiple disks unless the {% popover broker %} is {% popover standalone %}), whereas [non-persistent](#non-persistent-topics) topic does not persist message into storage disk. |
-| `property`           | The topic's {% popover tenant %} within the instance. Tenants are essential to {% popover multi-tenancy %} in Pulsar and can be spread across clusters.                                                                                      |
-| `cluster`            | Where the topic is located. Typically there will be one {% popover cluster %} for each geographical region or data center.                                                                                                                   |
-| `namespace`          | The administrative unit of the topic, which acts as a grouping mechanism for related topics. Most topic configuration is performed at the [namespace](#namespace) level. Each property (tenant) can have multiple namespaces.                              |
-| `topic`              | The final part of the name. Topic names are freeform and have no special meaning in a Pulsar instance.                                                                                                                                       |
+Topic name component | Description
+:--------------------|:-----------
+`persistent` / `non-persistent` | This identifies the type of topic. Pulsar supports two kind of topics: [persistent](#persistent-storage) and [non-persistent](#non-persistent-topics) (persistent is the default, so if you don't specify a type the topic will be persistent). With persistent topics, all messages are durably [persisted](#persistent-storage) on disk (that means on multiple disks unless the {% popover broker %} is {% popover standalone %}), whereas data for [non-persistent](#non-persistent-topics) topics isn't persisted to storage disks.
+`tenant`             | The topic's {% popover tenant %} within the instance. Tenants are essential to {% popover multi-tenancy %} in Pulsar and can be spread across clusters.
+`namespace`          | The administrative unit of the topic, which acts as a grouping mechanism for related topics. Most topic configuration is performed at the [namespace](#namespace) level. Each tenant can have multiple namespaces.
+`topic`              | The final part of the name. Topic names are freeform and have no special meaning in a Pulsar instance.
 
 {% include admonition.html type="success" title="No need to explicitly create new topics"
 content="You don't need to explicitly create topics in Pulsar. If a client attempts to write or receive messages to/from a topic that does not yet exist, Pulsar will automatically create that topic under the [namespace](#namespace) provided in the [topic name](#topics)." %}
 
-### Namespace
-
-A namespace is a logical nomenclature within a property. A property can create multiple namespaces via [admin API](../../admin-api/namespaces#create). For instance, a property with different applications can create a separate namespace for each application. A namespace allows the application to create and manage a hierarchy of topics. 
-For e.g.  `my-property/my-cluster/my-property-app1` is a namespace for the application  `my-property-app1` in cluster `my-cluster` for `my-property`. 
-Application can create any number of [topics](#topics) under the namespace.
+### Namespaces
 
+A namespace is a logical nomenclature within a tenant. A tenant can create multiple namespaces via the [admin API](../../admin-api/namespaces#create). For instance, a tenant with different applications can create a separate namespace for each application. A namespace allows the application to create and manage a hierarchy of topics. The topic `my-tenant/app1` is a namespace for the application `app1` for `my-tenant`. You can create any number of [topics](#topics) under the namespace.
 
 ### Subscription modes
 
 A subscription is a named configuration rule that determines how messages are delivered to {% popover consumers %}. There are three available subscription modes in Pulsar: [exclusive](#exclusive), [shared](#shared), and [failover](#failover). These modes are illustrated in the figure below.
 
-![Subscription Modes](/img/pulsar_subscriptions.jpg)
+{% include figure.html src="/img/pulsar-subscription-modes.png" alt="Subscription modes" width="80" %}
 
 #### Exclusive
 
@@ -129,7 +125,9 @@ In *exclusive* mode, only a single consumer is allowed to attach to the subscrip
 
 In the diagram above, only **Consumer-A** is allowed to consume messages.
 
-Exclusive mode is the default subscription mode.  
+{% include admonition.html type="info" content="Exclusive mode is the default subscription mode." %}
+
+{% include figure.html src="/img/exclusive-subscriptions.png" alt="Exclusive subscriptions" width="80" %}
 
 #### Shared
 
@@ -139,6 +137,8 @@ In the diagram above, **Consumer-B-1** and **Consumer-B-2** are able to subscrib
 
 {% include message.html id="shared_mode_limitations" %}
 
+{% include figure.html src="/img/shared-subscriptions.png" alt="Shared subscriptions" width="80" %}
+
 #### Failover
 
 In *failover* mode, multiple consumers can attach to the same subscription. The consumers will be lexically sorted by the consumer's name and the first consumer will initially be the only one receiving messages. This consumer is called the *master consumer*.
@@ -147,6 +147,8 @@ When the master consumer disconnects, all (non-acked and subsequent) messages wi
 
 In the diagram above, Consumer-C-1 is the master consumer while Consumer-C-2 would be the next in line to receive messages if Consumer-C-2 disconnected.
 
+{% include figure.html src="/img/failover-subscriptions.png" alt="Shared subscriptions" width="80" %}
+
 ### Multi-topic subscriptions
 
 When a {% popover consumer %} subscribes to a Pulsar {% popover topic %}, by default it subscribes to one specific topic, such as `persistent://sample/ns1/standalone/my-topic`. As of Pulsar version 1.23.0-incubating, however, Pulsar consumers can simultaneously subscribe to multiple topics. You can define a list of topics in two ways:
@@ -244,15 +246,15 @@ At the highest level, a Pulsar {% popover instance %} is composed of one or more
 
 In a Pulsar cluster:
 
-* One or more {% popover brokers %} handles and load balances incoming messages from {% popover producers %}, dispatches messages to {% popover consumers %}, communicates with {% popover global ZooKeeper %} to handle various coordination tasks, stores messages in {% popover BookKeeper %} instances (aka {% popover bookies %}), relies on a cluster-specific {% popover ZooKeeper %} cluster for certain tasks, and more.
+* One or more {% popover brokers %} handles and load balances incoming messages from {% popover producers %}, dispatches messages to {% popover consumers %}, communicates with the Pulsar {% popover configuration store %} to handle various coordination tasks, stores messages in {% popover BookKeeper %} instances (aka {% popover bookies %}), relies on a cluster-specific {% popover ZooKeeper %} cluster for certain tasks, and more.
 * A {% popover BookKeeper %} cluster consisting of one more or more {% popover bookies %} handles [persistent storage](#persistent-storage) of messages.
 * A {% popover ZooKeeper %} cluster specific to that cluster handles
 
 The diagram below provides an illustration of a Pulsar cluster:
 
-![Architecture Diagram](/img/pulsar_system_architecture.png)
+{% include figure.html src="/img/pulsar-system-architecture.png" alt="Pulsar architecture diagram" width="90" %}
 
-At the broader {% popover instance %} level, an instance-wide ZooKeeper cluster called {% popover global ZooKeeper %} handles coordination tasks involving multiple clusters, for example [geo-replication](#replication).
+At the broader {% popover instance %} level, an instance-wide ZooKeeper cluster called the {% popover configuration store %} handles coordination tasks involving multiple clusters, for example [geo-replication](#replication).
 
 ## Brokers
 
@@ -279,23 +281,13 @@ Clusters can replicate amongst themselves using [geo-replication](#geo-replicati
 
 {% include admonition.html type="info" content="For a guide to managing Pulsar clusters, see the [Clusters and brokers](../../admin/ClustersBrokers#managing-clusters) guide." %}
 
-### Global cluster
-
-In any Pulsar {% popover instance %}, there is an instance-wide cluster called `global` that you can use to mange non-cluster-specific namespaces and topics. The `global` cluster is created for you automatically when you [initialize metadata](../../admin/ClustersBrokers#initialize-cluster-metadata) for the first cluster in your instance.
-
-Global topic names have this basic structure (note the `global` cluster):
-
-{% include topic.html p="my-property" c="global" n="my-namespace" t="my-topic" %}
-
 ## Metadata store
 
 Pulsar uses [Apache Zookeeper](https://zookeeper.apache.org/) for metadata storage, cluster configuration, and coordination. In a Pulsar instance:
 
-* A {% popover global ZooKeeper %} quorum stores configuration for {% popover properties %}, {% popover namespaces %}, and other entities that need to be globally consistent.
+* A {% popover configuration store %} quorum stores configuration for {% popover tenants %}, {% popover namespaces %}, and other entities that need to be globally consistent.
 * Each cluster has its own local ZooKeeper ensemble that stores {% popover cluster %}-specific configuration and coordination such as ownership metadata, broker load reports, BookKeeper {% popover ledger %} metadata, and more.
 
-When creating a [new cluster](../../admin/ClustersBrokers#initialize-cluster-metadata)
-
 ## Persistent storage
 
 Pulsar provides guaranteed message delivery for applications. If a message successfully reaches a Pulsar {% popover broker %}, it will be delivered to its intended target.
@@ -317,13 +309,13 @@ In addition to message data, *cursors* are also persistently stored in BookKeepe
 
 At the moment, Pulsar only supports persistent message storage. This accounts for the `persistent` in all {% popover topic %} names. Here's an example:
 
-{% include topic.html p="my-property" c="global" n="my-namespace" t="my-topic" %}
+{% include topic.html ten="my-property" n="my-namespace" t="my-topic" %}
 
 {% include admonition.html type="success" content='Pulsar also supports ephemeral ([non-persistent](#non-persistent-topics)) message storage.' %}
 
 You can see an illustration of how {% popover brokers %} and {% popover bookies %} interact in the diagram below:
 
-![Brokers and bookies](/img/broker-bookie.png)
+{% include figure.html src="/img/broker-bookie.png" alt="Brokers and bookies" width="80" %}
 
 ### Ledgers
 
@@ -370,7 +362,7 @@ Pulsar has two features, however, that enable you to override this default behav
 
 The diagram below illustrates both concepts:
 
-{% img /img/retention-expiry.png 80 %}
+{% include figure.html src="/img/retention-expiry.png" alt="Message retention and expiry" width="80" %}
 
 With message retention, shown at the top, a <span style="color: #89b557;">retention policy</span> applied to all topics in a {% popover namespace %} dicates that some messages are durably stored in Pulsar even though they've already been acknowledged. Acknowledged messages that are not covered by the retention policy are <span style="color: #bb3b3e;">deleted</span>. Without a retention policy, *all* of the <span style="color: #19967d;">acknowledged messages</span> would be deleted.
 
@@ -390,7 +382,7 @@ Message **duplication** occurs when a message is [persisted](#persistent-storage
 
 The following diagram illustrates what happens when message deduplication is disabled vs. enabled:
 
-{% img /img/message-deduplication.png 75 %}
+{% include figure.html src="/img/message-deduplication.png" alt="Pulsar message deduplication" width="75" %}
 
 Message deduplication is disabled in the scenario shown at the top. Here, a producer publishes message 1 on a topic; the message reaches a Pulsar {% popover broker %} and is [persisted](#persistent-storage) to BookKeeper. The producer then sends message 1 again (in this case due to some retry logic), and the message is received by the broker and stored in BookKeeper again, which means that duplication has occurred.
 
@@ -412,21 +404,21 @@ More in-depth information can be found in [this post](https://blog.streaml.io/pu
 
 ## Multi-tenancy
 
-Pulsar was created from the ground up as a {% popover multi-tenant %} system. To support multi-tenancy, Pulsar has a concept of {% popover properties %}. Properties can be spread across {% popover clusters %} and can each have their own [authentication and authorization](../../admin/Authz) scheme applied to them. They are also the administrative unit at which [storage quotas](TODO), [message TTL](../../cookbooks/RetentionExpiry#time-to-live-ttl), and isolation policies can be managed.
+Pulsar was created from the ground up as a {% popover multi-tenant %} system. To support multi-tenancy, Pulsar has a concept of {% popover tenants %}. Tenants can be spread across {% popover clusters %} and can each have their own [authentication and authorization](../../admin/Authz) scheme applied to them. They are also the administrative unit at which storage quotas, [message TTL](../../cookbooks/RetentionExpiry#time-to-live-ttl), and isolation policies can be managed.
 
 The multi-tenant nature of Pulsar is reflected mostly visibly in topic URLs, which have this structure:
 
-{% include topic.html p="property" c="cluster" n="namespace" t="topic" %}
+{% include topic.html ten="tenant" n="namespace" t="topic" %}
 
-As you can see, the property is the most basic unit of categorization for topics (and even more fundamental than the {% popover cluster %}).
+As you can see, the tenant is the most basic unit of categorization for topics (more fundamental than the {% popover namespace %} and topic name).
 
-### Properties and namespaces
+### Tenants and namespaces
 
-{% include explanations/properties-namespaces.md %}
+{% include explanations/tenants-namespaces.md %}
 
 ## Authentication and Authorization
 
-Pulsar supports a pluggable [authentication](../../admin/Authz) mechanism which can be configured at broker and it also supports authorization to identify client and its access rights on topics and properties.
+Pulsar supports a pluggable [authentication](../../admin/Authz) mechanism which can be configured at broker and it also supports authorization to identify client and its access rights on topics and tenants.
 
 ## Client interface
 
@@ -455,12 +447,12 @@ The **Pulsar proxy** provides a solution to this problem by acting as a single g
 
 {% include admonition.html type="success" content="For the sake of performance and fault tolerance, you can run as many instances of the Pulsar proxy as you'd like." %}
 
-Architecturally, the Pulsar proxy gets all the information it requires from ZooKeeper. When starting the proxy on a machine, you only need to provide ZooKeeper connection strings for the cluster-specific and {% popover global ZooKeeper %} clusters. Here's an example:
+Architecturally, the Pulsar proxy gets all the information it requires from ZooKeeper. When starting the proxy on a machine, you only need to provide ZooKeeper connection strings for the cluster-specific and instance-wide {% popover configuration store %} clusters. Here's an example:
 
 ```bash
 $ bin/pulsar proxy \
   --zookeeper-servers zk-0,zk-1,zk-2 \
-  --global-zookeeper-servers zk-0,zk-1,zk-2
+  --configuration-store-servers zk-0,zk-1,zk-2
 ```
 
 {% include admonition.html type="info" title="Pulsar proxy docs" content='
@@ -478,6 +470,18 @@ Some important things to know about the Pulsar proxy:
 
 You can use your own service discovery system if you'd like. If you use your own system, there is just one requirement: when a client performs an HTTP request to an endpoint, such as `http://pulsar.us-west.example.com:8080`, the client needs to be redirected to *some* active broker in the desired {% popover cluster %}, whether via DNS, an HTTP or IP redirect, or some other means.
 
+The diagram below illustrates Pulsar service discovery:
+
+{% include figure.html src="/img/pulsar-service-discovery.png" width="50" %}
+
+In this diagram, the Pulsar cluster is addressable via a single DNS name: `pulsar-cluster.acme.com`. A [Python client](../../clients/Python), for example, could access this Pulsar cluster like this:
+
+```python
+from pulsar import Client
+
+client = Client('pulsar://pulsar-cluster.acme.com:6650')
+```
+
 ## Reader interface
 
 In Pulsar, the "standard" [consumer interface](#consumers) involves using {% popover consumers %} to listen on {% popover topics %}, process incoming messages, and finally {% popover acknowledge %} those messages when they've been processed. Whenever a consumer connects to a topic, it automatically begins reading from the earliest un-acked message onward because the topic's cursor is automatically managed by Pulsar.
@@ -490,7 +494,7 @@ The **reader interface** for Pulsar enables applications to manually manage curs
 
 The reader interface is helpful for use cases like using Pulsar to provide [effectively-once](https://blog.streaml.io/exactly-once/) processing semantics for a stream processing system. For this use case, it's essential that the stream processing system be able to "rewind" topics to a specific message and begin reading there. The reader interface provides Pulsar clients with the low-level abstraction necessary to "manually position" themselves within a topic.
 
-<img src="/img/pulsar-reader-consumer-interfaces.png" alt="The Pulsar consumer and reader interfaces" width="80%">
+{% include figure.html src="/img/pulsar-reader-consumer-interfaces.png" alt="The Pulsar consumer and reader interfaces" width="80" %}
 
 {% include admonition.html type="warning" title="Non-partitioned topics only"
 content="The reader interface for Pulsar cannot currently be used with [partitioned topics](#partitioned-topics)." %}
diff --git a/site/docs/latest/getting-started/LocalCluster.md b/site/docs/latest/getting-started/LocalCluster.md
index 7dfdbc577e..ecd1aca061 100644
--- a/site/docs/latest/getting-started/LocalCluster.md
+++ b/site/docs/latest/getting-started/LocalCluster.md
@@ -28,9 +28,7 @@ next: ../ConceptsAndArchitecture
 
 -->
 
-For the purposes of local development and testing, you can run Pulsar in {% popover standalone %} mode on your own machine.
-
-Standalone mode includes a Pulsar {% popover broker %} as well as the necessary {% popover ZooKeeper %} and {% popover BookKeeper %} components running inside of a single Java Virtual Machine (JVM) process.
+For the purposes of local development and testing, you can run Pulsar in {% popover standalone %} mode on your own machine. Standalone mode includes a Pulsar {% popover broker %} as well as the necessary {% popover ZooKeeper %} and {% popover BookKeeper %} components running inside of a single Java Virtual Machine (JVM) process.
 
 {% include admonition.html type="info" title='Pulsar in production?' content="
 If you're looking to run a full production Pulsar installation, see the [Deploying a Pulsar instance](../../deployment/InstanceSetup) guide." %}
@@ -61,15 +59,14 @@ When you start a local standalone cluster, Pulsar will automatically create a `s
 Pulsar provides a CLI tool called [`pulsar-client`](../../reference/CliTools#pulsar-client) that enables you to do things like send messages to a Pulsar {% popover topic %} in a running cluster. This command will send a simple message saying `hello-pulsar` to the `persistent://sample/standalone/ns1/my-topic` topic:
 
 ```bash
-$ bin/pulsar-client produce \
-  persistent://sample/standalone/ns1/my-topic \
-  -m 'hello-pulsar'
+$ bin/pulsar-client produce my-topic \
+  --messages "hello-pulsar"
 ```
 
 If the message has been successfully published to the topic, you should see a confirmation like this in the `pulsar-client` logs:
 
 ```
-2017-06-01 18:18:57,094 - INFO  - [main:CmdProduce@189] - 1 messages successfully produced
+13:09:39.356 [main] INFO  org.apache.pulsar.client.cli.PulsarClientTool - 1 messages successfully produced
 ```
 
 {% include admonition.html type="success" title="No need to explicitly create new topics"
@@ -86,11 +83,9 @@ Here's an example producer for a Pulsar {% popover topic %} using the [Java](../
 
 ```java
 String localClusterUrl = "pulsar://localhost:6650";
-String namespace = "sample/standalone/ns1"; // This namespace is created automatically
-String topic = String.format("persistent://%s/my-topic", namespace);
 
 PulsarClient client = PulsarClient.create(localClusterUrl);
-Producer producer = client.createProducer(topic);
+Producer producer = client.createProducer("my-topic");
 ```
 
 Here's an example [Python](../../clients/Python) producer:
@@ -98,10 +93,8 @@ Here's an example [Python](../../clients/Python) producer:
 ```python
 import pulsar
 
-TOPIC = 'persistent://sample/standalone/ns/my-topic'
-
 client = pulsar.Client('pulsar://localhost:6650')
-producer = client.create_producer(TOPIC)
+producer = client.create_producer('my-topic')
 ```
 
 Finally, here's an example [C++](../../clients/Cpp) producer:
@@ -109,7 +102,7 @@ Finally, here's an example [C++](../../clients/Cpp) producer:
 ```cpp
 Client client("pulsar://localhost:6650");
 Producer producer;
-Result result = client.createProducer("persistent://sample/standalone/ns1/my-topic", producer);
+Result result = client.createProducer("my-topic", producer);
 if (result != ResultOk) {
     LOG_ERROR("Error creating producer: " << result);
     return -1;
diff --git a/site/docs/latest/getting-started/Pulsar-2.0.md b/site/docs/latest/getting-started/Pulsar-2.0.md
new file mode 100644
index 0000000000..9ee8c30880
--- /dev/null
+++ b/site/docs/latest/getting-started/Pulsar-2.0.md
@@ -0,0 +1,62 @@
+---
+title: Pulsar 2.0
+hide_pulsar2_notification: true
+new: true
+tags: ["2.0", "tenants", "clients"]
+---
+
+Pulsar 2.0 is a major new release for Pulsar that brings some bold changes to the platform, including [simplified topic names](#topic-names)
+
+## New features in Pulsar 2.0
+
+Feature | Description
+:-------|:-----------
+[Pulsar Functions](../../functions/overview) | A lightweight compute option for Pulsar
+
+## Major changes
+
+There are a few major changes that you should be aware of, as they may significantly impact your day-to-day usage.
+
+### Properties versus tenants {#tenants}
+
+Previously, Pulsar had a concept of {% popover properties %}. A property is essentially the exact same thing as a {% popover tenant %}, so the "property" terminology has been removed in version 2.0. The [`pulsar-admin properties`](../../CliTools#pulsar-admin) command-line interface, for example, has been replaced with the [`pulsar-admin tenants`](../../CliTools#pulsar-admin-tenants) interface. In some cases the properties terminology is still used but is now considered deprecated and will be removed entirely in a future release.
+
+### Topic names
+
+Prior to version 2.0, *all* Pulsar topics had the following form:
+
+{% include topic.html type="{persistent|non-persistent}" ten="property" n="namespace" c="cluster" t="topic" %}
+
+Two important changes have been made in Pulsar 2.0:
+
+* There is no longer a [cluster component](#no-cluster)
+* Properties have been [renamed to tenants](#tenants)
+* You can use a [flexible](#flexible-topic-naming) naming system to shorten many topic names
+
+#### No cluster component {#no-cluster}
+
+The {% popover cluster %} component has been removed from topic names. Thus, all topic names now have the following form:
+
+{% include topic.html type="{persistent|non-persistent}" ten="tenant" n="namespace" t="topic" %}
+
+{% include admonition.html type="success" content="Existing topics that use the legacy name format will continue to work without any change, and there are no plans to change that." %}
+
+#### Flexible topic naming
+
+All topic names in Pulsar 2.0 internally have the form shown [above](#no-cluster-component) but you can now use shorthand names in many cases (for the sake of simplicity). The flexible naming system stems from the fact that there is now a default topic type, tenant, and namespace:
+
+Topic aspect | Default
+:------------|:-------
+topic type | `persistent`
+tenant | `public`
+namespace | `default`
+
+The table below shows some example topic name translations that use implicit defaults:
+
+Input topic name | Translated topic name
+:----------------|:---------------------
+`my-topic` | `persistent://public/default/my-topic`
+`my-tenant/my-namespace/my-topic` | `persistent://my-tenant/my-namespace/my-topic`
+
+{% include admonition.html type="warning" id="non-persistent-topic-names" content="
+For [non-persistent topics](../ConceptsAndArchitecture#non-persistent-topics) you'll need to continue to specify the entire topic name, as the default-based rules for persistent topic names don't apply. Thus you cannot use a shorthand name like `non-persistent://my-topic` and would need to use `non-persistent://public/default/my-topic` instead." %}
\ No newline at end of file
diff --git a/site/docs/latest/reference/Configuration.md b/site/docs/latest/reference/Configuration.md
index 91da121344..7fcc58863a 100644
--- a/site/docs/latest/reference/Configuration.md
+++ b/site/docs/latest/reference/Configuration.md
@@ -29,7 +29,7 @@ Pulsar configuration can be managed either via a series of configuration files c
 * [Broker](#broker)
 * [Client](#client)
 * [Service discovery](#service-discovery)
-* [Global ZooKeeper](#global-zookeeper)
+* [Configuration store](#configuration-store)
 * [Log4j](#log4j)
 * [Log4j shell](#log4j-shell)
 * [Standalone](#standalone)
@@ -58,9 +58,9 @@ The [`pulsar-client`](../CliTools#pulsar-client) CLI tool can be used to publish
 
 {% include config.html id="discovery" %}
 
-## Global ZooKeeper
+## Configuration store
 
-{% include config.html id="global-zookeeper" %}
+{% include config.html id="configuration-store" %}
 
 ## Log4j
 
diff --git a/site/img/exclusive-subscriptions.png b/site/img/exclusive-subscriptions.png
new file mode 100644
index 0000000000..3d5867b4ed
Binary files /dev/null and b/site/img/exclusive-subscriptions.png differ
diff --git a/site/img/partitioning.png b/site/img/partitioning.png
new file mode 100644
index 0000000000..b0494522b3
Binary files /dev/null and b/site/img/partitioning.png differ
diff --git a/site/img/pulsar-service-discovery.png b/site/img/pulsar-service-discovery.png
new file mode 100644
index 0000000000..4dc3224617
Binary files /dev/null and b/site/img/pulsar-service-discovery.png differ
diff --git a/site/img/pulsar-subscription-modes.png b/site/img/pulsar-subscription-modes.png
new file mode 100644
index 0000000000..e8e618b80a
Binary files /dev/null and b/site/img/pulsar-subscription-modes.png differ
diff --git a/site/img/pulsar_system_architecture.png b/site/img/pulsar-system-architecture.png
similarity index 100%
rename from site/img/pulsar_system_architecture.png
rename to site/img/pulsar-system-architecture.png
diff --git a/site/img/shared-subscriptions.png b/site/img/shared-subscriptions.png
new file mode 100644
index 0000000000..13c0dae037
Binary files /dev/null and b/site/img/shared-subscriptions.png differ


 

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
users@infra.apache.org


With regards,
Apache Git Services