You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@pulsar.apache.org by GitBox <gi...@apache.org> on 2018/05/31 00:40:55 UTC

[GitHub] Audace closed pull request #1872: Update configuration files from 2.0.0

Audace closed pull request #1872: Update configuration files from 2.0.0
URL: https://github.com/apache/incubator-pulsar/pull/1872
 
 
   

This is a PR merged from a forked repository.
As GitHub hides the original diff on merge, it is displayed below for
the sake of provenance:

As this is a foreign pull request (from a fork), the diff is supplied
below (as it won't show otherwise due to GitHub magic):

diff --git a/deployment/terraform-ansible/deploy-pulsar.yaml b/deployment/terraform-ansible/deploy-pulsar.yaml
index bec552ae0b..78dc3a17d6 100644
--- a/deployment/terraform-ansible/deploy-pulsar.yaml
+++ b/deployment/terraform-ansible/deploy-pulsar.yaml
@@ -56,7 +56,7 @@
         zookeeper_servers: "{{ groups['zookeeper']|map('extract', hostvars, ['ansible_default_ipv4', 'address'])|map('regex_replace', '(.*)', '\\1:2181') | join(',') }}"
         service_url: "pulsar://{{ hostvars[groups['pulsar'][0]].public_ip }}:6650/"
         http_url: "http://{{ hostvars[groups['pulsar'][0]].public_ip }}:8080/"
-        pulsar_version: "1.22.0-incubating"
+        pulsar_version: "2.0.0-rc1-incubating"
 
     - name: Download Pulsar binary package
       unarchive:
@@ -80,7 +80,7 @@
     - set_fact:
         zid: "{{ groups['zookeeper'].index(inventory_hostname) }}"
         max_heap_memory: "512m"
-        max_direct_memory: "512m"
+        max_direct_memory: "1g"
         cluster_name: "local"
     - name: Create ZooKeeper data directory
       file:
diff --git a/deployment/terraform-ansible/templates/bookkeeper.conf b/deployment/terraform-ansible/templates/bookkeeper.conf
index 7e97989cff..942dcffbbe 100644
--- a/deployment/terraform-ansible/templates/bookkeeper.conf
+++ b/deployment/terraform-ansible/templates/bookkeeper.conf
@@ -17,28 +17,43 @@
 # under the License.
 #
 
+## Bookie settings
 
-zkServers={{ zookeeper_servers }}
+#############################################################################
+## Server parameters
+#############################################################################
 
-advertisedAddress={{ hostvars[inventory_hostname].private_ip }}
+# Port that bookie server listen on
+bookiePort=3181
 
-# Use multiple journals to better exploit SSD throughput
-journalDirectories=/mnt/journal/1,/mnt/journal/2,/mnt/journal/3,/mnt/journal/4
-ledgerDirectories=/mnt/storage
+# Directories BookKeeper outputs its write ahead log.
+# Could define multi directories to store write head logs, separated by ','.
+# For example:
+#   journalDirectories=/tmp/bk-journal1,/tmp/bk-journal2
+# If journalDirectories is set, bookies will skip journalDirectory and use
+# this setting directory.
+# journalDirectories=/tmp/bk-journal
 
-dbStorage_writeCacheMaxSizeMb=4096
-dbStorage_readAheadCacheMaxSizeMb=4096
-dbStorage_rocksDB_blockCacheSize=4294967296
+# Directory Bookkeeper outputs its write ahead log
+# @deprecated since 4.5.0. journalDirectories is preferred over journalDirectory.
+journalDirectory=data/bookkeeper/journal
 
+# Configure the bookie to allow/disallow multiple ledger/index/journal directories
+# in the same filesystem disk partition
+# allowMultipleDirsUnderSameDiskPartition=false
 
-## Regular Bookie settings
-
-# Port that bookie server listen on
-bookiePort=3181
+# Minimum safe Usable size to be available in index directory for bookie to create
+# Index File while replaying journal at the time of bookie Start in Readonly Mode (in bytes)
+minUsableSizeForIndexFileCreation=1073741824
 
 # Set the network interface that the bookie should listen on.
 # If not set, the bookie will listen on all interfaces.
-#listeningInterface=eth0
+# listeningInterface=eth0
+
+# Configure a specific hostname or IP address that the bookie should use to advertise itself to
+# clients. If not set, bookie will advertised its own IP address or hostname, depending on the
+# listeningInterface and `seHostNameAsBookieID settings.
+advertisedAddress={{ hostvars[inventory_hostname].public_ip }}
 
 # Whether the bookie allowed to use a loopback interface as its primary
 # interface(i.e. the interface it uses to establish its identity)?
@@ -53,101 +68,231 @@ bookiePort=3181
 # set the listening interface.
 allowLoopback=false
 
-# Configure a specific hostname or IP address that the bookie should use to advertise itself to
-# clients. If not set, bookie will advertised its own IP address or hostname, depending on the
-# listeningInterface and `seHostNameAsBookieID settings.
-# advertisedAddress=
+# Interval to watch whether bookie is dead or not, in milliseconds
+bookieDeathWatchInterval=1000
 
-# Directory Bookkeeper outputs its write ahead log
-# journalDirectory=data/bookkeeper/journal
+# When entryLogPerLedgerEnabled is enabled, checkpoint doesn't happens
+# when a new active entrylog is created / previous one is rolled over.
+# Instead SyncThread checkpoints periodically with 'flushInterval' delay
+# (in milliseconds) in between executions. Checkpoint flushes both ledger
+# entryLogs and ledger index pages to disk.
+# Flushing entrylog and index files will introduce much random disk I/O.
+# If separating journal dir and ledger dirs each on different devices,
+# flushing would not affect performance. But if putting journal dir
+# and ledger dirs on same device, performance degrade significantly
+# on too frequent flushing. You can consider increment flush interval
+# to get better performance, but you need to pay more time on bookie
+# server restart after failure.
+# This config is used only when entryLogPerLedgerEnabled is enabled.
+flushInterval=60000
 
-# Directory Bookkeeper outputs ledger snapshots
-# could define multi directories to store snapshots, separated by ','
-# For example:
-# ledgerDirectories=/tmp/bk1-data,/tmp/bk2-data
-#
-# Ideally ledger dirs and journal dir are each in a differet device,
-# which reduce the contention between random i/o and sequential write.
-# It is possible to run with a single disk, but performance will be significantly lower.
-# ledgerDirectories=data/bookkeeper/ledgers
-# Directories to store index files. If not specified, will use ledgerDirectories to store.
-# indexDirectories=data/bookkeeper/ledgers
+# Allow the expansion of bookie storage capacity. Newly added ledger
+# and index dirs must be empty.
+# allowStorageExpansion=false
 
-# Ledger Manager Class
-# What kind of ledger manager is used to manage how ledgers are stored, managed
-# and garbage collected. Try to read 'BookKeeper Internals' for detail info.
-ledgerManagerType=hierarchical
+# Whether the bookie should use its hostname to register with the
+# co-ordination service(eg: Zookeeper service).
+# When false, bookie will use its ipaddress for the registration.
+# Defaults to false.
+useHostNameAsBookieID=false
 
-# Root zookeeper path to store ledger metadata
-# This parameter is used by zookeeper-based ledger manager as a root znode to
-# store all ledgers.
-zkLedgersRootPath=/ledgers
+# Whether the bookie is allowed to use an ephemeral port (port 0) as its
+# server port. By default, an ephemeral port is not allowed.
+# Using an ephemeral port as the service port usually indicates a configuration
+# error. However, in unit tests, using an ephemeral port will address port
+# conflict problems and allow running tests in parallel.
+# allowEphemeralPorts=false
 
-# Ledger storage implementation class
-ledgerStorageClass=org.apache.bookkeeper.bookie.storage.ldb.DbLedgerStorage
+# Whether allow the bookie to listen for BookKeeper clients executed on the local JVM.
+# enableLocalTransport=false
 
-# Enable/Disable entry logger preallocation
-entryLogFilePreallocationEnabled=true
+# Whether allow the bookie to disable bind on network interfaces,
+# this bookie will be available only to BookKeeper clients executed on the local JVM.
+# disableServerSocketBind=false
 
-# Max file size of entry logger, in bytes
-# A new entry log file will be created when the old one reaches the file size limitation
-logSizeLimit=2147483648
+# The number of bytes we should use as chunk allocation for
+# org.apache.bookkeeper.bookie.SkipListArena
+# skipListArenaChunkSize=4194304
 
-# Threshold of minor compaction
-# For those entry log files whose remaining size percentage reaches below
-# this threshold will be compacted in a minor compaction.
-# If it is set to less than zero, the minor compaction is disabled.
-minorCompactionThreshold=0.2
+# The max size we should allocate from the skiplist arena. Allocations
+# larger than this should be allocated directly by the VM to avoid fragmentation.
+# skipListArenaMaxAllocSize=131072
 
-# Interval to run minor compaction, in seconds
-# If it is set to less than zero, the minor compaction is disabled.
-minorCompactionInterval=3600
+# The bookie authentication provider factory class name.
+# If this is null, no authentication will take place.
+# bookieAuthProviderFactoryClass=null
 
-# Threshold of major compaction
-# For those entry log files whose remaining size percentage reaches below
-# this threshold will be compacted in a major compaction.
-# Those entry log files whose remaining size percentage is still
-# higher than the threshold will never be compacted.
-# If it is set to less than zero, the minor compaction is disabled.
-majorCompactionThreshold=0.5
+#############################################################################
+## Garbage collection settings
+#############################################################################
 
-# Interval to run major compaction, in seconds
-# If it is set to less than zero, the major compaction is disabled.
-majorCompactionInterval=86400
+# How long the interval to trigger next garbage collection, in milliseconds
+# Since garbage collection is running in background, too frequent gc
+# will heart performance. It is better to give a higher number of gc
+# interval if there is enough disk capacity.
+gcWaitTime=900000
 
-# Set the maximum number of entries which can be compacted without flushing.
-# When compacting, the entries are written to the entrylog and the new offsets
-# are cached in memory. Once the entrylog is flushed the index is updated with
-# the new offsets. This parameter controls the number of entries added to the
-# entrylog before a flush is forced. A higher value for this parameter means
-# more memory will be used for offsets. Each offset consists of 3 longs.
-# This parameter should _not_ be modified unless you know what you're doing.
-# The default is 100,000.
-compactionMaxOutstandingRequests=100000
+# How long the interval to trigger next garbage collection of overreplicated
+# ledgers, in milliseconds [Default: 1 day]. This should not be run very frequently
+# since we read the metadata for all the ledgers on the bookie from zk
+gcOverreplicatedLedgerWaitTime=86400000
 
-# Set the rate at which compaction will readd entries. The unit is adds per second.
-compactionRate=1000
+# Number of threads that should handle write requests. if zero, the writes would
+# be handled by netty threads directly.
+numAddWorkerThreads=0
 
-# Throttle compaction by bytes or by entries.
-isThrottleByBytes=false
+# Number of threads that should handle read requests. if zero, the reads would
+# be handled by netty threads directly.
+numReadWorkerThreads=8
 
-# Set the rate at which compaction will readd entries. The unit is adds per second.
-compactionRateByEntries=1000
+# Number of threads that should be used for high priority requests
+# (i.e. recovery reads and adds, and fencing).
+numHighPriorityWorkerThreads=8
 
-# Set the rate at which compaction will readd entries. The unit is bytes added per second.
-compactionRateByBytes=1000000
+# If read workers threads are enabled, limit the number of pending requests, to
+# avoid the executor queue to grow indefinitely
+maxPendingReadRequestsPerThread=2500
+
+# If add workers threads are enabled, limit the number of pending requests, to
+# avoid the executor queue to grow indefinitely
+maxPendingAddRequestsPerThread=10000
+
+# Whether force compaction is allowed when the disk is full or almost full.
+# Forcing GC may get some space back, but may also fill up disk space more quickly.
+# This is because new log files are created before GC, while old garbage
+# log files are deleted after GC.
+# isForceGCAllowWhenNoSpace=false
+
+# True if the bookie should double check readMetadata prior to gc
+# verifyMetadataOnGC=false
+
+#############################################################################
+## TLS settings
+#############################################################################
+
+# TLS Provider (JDK or OpenSSL).
+# tlsProvider=OpenSSL
+
+# The path to the class that provides security.
+# tlsProviderFactoryClass=org.apache.bookkeeper.security.SSLContextFactory
+
+# Type of security used by server.
+# tlsClientAuthentication=true
+
+# Bookie Keystore type.
+# tlsKeyStoreType=JKS
+
+# Bookie Keystore location (path).
+# tlsKeyStore=null
+
+# Bookie Keystore password path, if the keystore is protected by a password.
+# tlsKeyStorePasswordPath=null
+
+# Bookie Truststore type.
+# tlsTrustStoreType=null
+
+# Bookie Truststore location (path).
+# tlsTrustStore=null
+
+# Bookie Truststore password path, if the trust store is protected by a password.
+# tlsTrustStorePasswordPath=null
+
+#############################################################################
+## Long poll request parameter settings
+#############################################################################
+
+# The number of threads that should handle long poll requests.
+# numLongPollWorkerThreads=10
+
+# The tick duration in milliseconds for long poll requests.
+# requestTimerTickDurationMs=10
+
+# The number of ticks per wheel for the long poll request timer.
+# requestTimerNumTicks=1024
+
+#############################################################################
+## AutoRecovery settings
+#############################################################################
+
+# The interval between auditor bookie checks.
+# The auditor bookie check, checks ledger metadata to see which bookies should
+# contain entries for each ledger. If a bookie which should contain entries is
+# unavailable, then the ledger containing that entry is marked for recovery.
+# Setting this to 0 disabled the periodic check. Bookie checks will still
+# run when a bookie fails.
+# The interval is specified in seconds.
+auditorPeriodicBookieCheckInterval=86400
+
+# The number of entries that a replication will rereplicate in parallel.
+rereplicationEntryBatchSize=5000
+
+# Auto-replication
+# The grace period, in seconds, that the replication worker waits before fencing and
+# replicating a ledger fragment that's still being written to upon bookie failure.
+# openLedgerRereplicationGracePeriod=30
+
+# Whether the bookie itself can start auto-recovery service also or not
+# autoRecoveryDaemonEnabled=false
+
+# How long to wait, in seconds, before starting auto recovery of a lost bookie
+# lostBookieRecoveryDelay=0
+
+#############################################################################
+## Netty server settings
+#############################################################################
+
+# This settings is used to enabled/disabled Nagle's algorithm, which is a means of
+# improving the efficiency of TCP/IP networks by reducing the number of packets
+# that need to be sent over the network.
+# If you are sending many small messages, such that more than one can fit in
+# a single IP packet, setting server.tcpnodelay to false to enable Nagle algorithm
+# can provide better performance.
+# Default value is true.
+serverTcpNoDelay=true
+
+# This setting is used to send keep-alive messages on connection-oriented sockets.
+# serverSockKeepalive=true
+
+# The socket linger timeout on close.
+# When enabled, a close or shutdown will not return until all queued messages for
+# the socket have been successfully sent or the linger timeout has been reached.
+# Otherwise, the call returns immediately and the closing is done in the background.
+# serverTcpLinger=0
+
+# The Recv ByteBuf allocator initial buf size.
+# byteBufAllocatorSizeInitial=65536
+
+# The Recv ByteBuf allocator min buf size.
+# byteBufAllocatorSizeMin=65536
+
+# The Recv ByteBuf allocator max buf size.
+# byteBufAllocatorSizeMax=1048576
+
+#############################################################################
+## Journal settings
+#############################################################################
+
+# The journal format version to write.
+# Available formats are 1-5:
+# 1: no header
+# 2: a header section was added
+# 3: ledger key was introduced
+# 4: fencing key was introduced
+# 5: expanding header to 512 and padding writes to align sector size configured by `journalAlignmentSize`
+# By default, it is `4`. If you'd like to enable `padding-writes` feature, you can set journal version to `5`.
+# You can disable `padding-writes` by setting journal version back to `4`. This feature is available in 4.5.0
+# and onward versions.
+# journalFormatVersionToWrite=4
 
 # Max file size of journal file, in mega bytes
 # A new journal file will be created when the old one reaches the file size limitation
-#
 journalMaxSizeMB=2048
 
 # Max number of old journal file to kept
 # Keep a number of old journal files would help data recovery in specia case
-#
 journalMaxBackups=5
 
-# How much space should we pre-allocate at a time in the journal
+# How much space should we pre-allocate at a time in the journal.
 journalPreAllocSizeMB=16
 
 # Size of the write buffers used for the journal
@@ -156,6 +301,14 @@ journalWriteBufferSizeKB=64
 # Should we remove pages from page cache after force write
 journalRemoveFromPageCache=true
 
+# Should the data be fsynced on journal before acknowledgment.
+# By default, data sync is enabled to guarantee durability of writes.
+# Beware: while disabling data sync in the Bookie journal might improve the bookie write performance, it will also
+# introduce the possibility of data loss. With no sync, the journal entries are written in the OS page cache but
+# not flushed to disk. In case of power failure, the affected bookie might lose the unflushed data. If the ledger
+# is replicated to multiple bookies, the chances of data loss are reduced though still present.
+journalSyncData=true
+
 # Should we group journal force writes, which optimize group commit
 # for higher throughput
 journalAdaptiveGroupWrites=true
@@ -163,73 +316,58 @@ journalAdaptiveGroupWrites=true
 # Maximum latency to impose on a journal write to achieve grouping
 journalMaxGroupWaitMSec=1
 
-# All the journal writes and commits should be aligned to given size
-journalAlignmentSize=4096
-
 # Maximum writes to buffer to achieve grouping
 journalBufferedWritesThreshold=524288
 
-# If we should flush the journal when journal queue is empty
-journalFlushWhenQueueEmpty=false
-
 # The number of threads that should handle journal callbacks
 numJournalCallbackThreads=8
 
-# The number of max entries to keep in fragment for re-replication
-rereplicationEntryBatchSize=5000
+# All the journal writes and commits should be aligned to given size.
+# If not, zeros will be padded to align to given size.
+# It only takes effects when journalFormatVersionToWrite is set to 5
+journalAlignmentSize=4096
 
-# How long the interval to trigger next garbage collection, in milliseconds
-# Since garbage collection is running in background, too frequent gc
-# will heart performance. It is better to give a higher number of gc
-# interval if there is enough disk capacity.
-gcWaitTime=900000
+# Maximum entries to buffer to impose on a journal write to achieve grouping.
+# journalBufferedEntriesThreshold=0
 
-# How long the interval to trigger next garbage collection of overreplicated
-# ledgers, in milliseconds [Default: 1 day]. This should not be run very frequently since we read
-# the metadata for all the ledgers on the bookie from zk
-gcOverreplicatedLedgerWaitTime=86400000
+# If we should flush the journal when journal queue is empty
+journalFlushWhenQueueEmpty=false
 
-# How long the interval to flush ledger index pages to disk, in milliseconds
-# Flushing index files will introduce much random disk I/O.
-# If separating journal dir and ledger dirs each on different devices,
-# flushing would not affect performance. But if putting journal dir
-# and ledger dirs on same device, performance degrade significantly
-# on too frequent flushing. You can consider increment flush interval
-# to get better performance, but you need to pay more time on bookie
-# server restart after failure.
-#
-flushInterval=60000
+#############################################################################
+## Ledger storage settings
+#############################################################################
 
-# Interval to watch whether bookie is dead or not, in milliseconds
+# Ledger storage implementation class
+ledgerStorageClass=org.apache.bookkeeper.bookie.storage.ldb.DbLedgerStorage
+
+# Directory Bookkeeper outputs ledger snapshots
+# could define multi directories to store snapshots, separated by ','
+# For example:
+# ledgerDirectories=/tmp/bk1-data,/tmp/bk2-data
 #
-bookieDeathWatchInterval=1000
+# Ideally ledger dirs and journal dir are each in a differet device,
+# which reduce the contention between random i/o and sequential write.
+# It is possible to run with a single disk, but performance will be significantly lower.
+ledgerDirectories=data/bookkeeper/ledgers
+# Directories to store index files. If not specified, will use ledgerDirectories to store.
+# indexDirectories=data/bookkeeper/ledgers
 
-## zookeeper client settings
+# Interval at which the auditor will do a check of all ledgers in the cluster.
+# By default this runs once a week. The interval is set in seconds.
+# To disable the periodic check completely, set this to 0.
+# Note that periodic checking will put extra load on the cluster, so it should
+# not be run more frequently than once a day.
+auditorPeriodicCheckInterval=604800
 
-# A list of one of more servers on which zookeeper is running.
-# The server list can be comma separated values, for example:
-# zkServers=zk1:2181,zk2:2181,zk3:2181
-zkServers=localhost:2181
-# ZooKeeper client session timeout in milliseconds
-# Bookie server will exit if it received SESSION_EXPIRED because it
-# was partitioned off from ZooKeeper for more than the session timeout
-# JVM garbage collection, disk I/O will cause SESSION_EXPIRED.
-# Increment this value could help avoiding this issue
-zkTimeout=30000
+# Whether sorted-ledger storage enabled (default true)
+# sortedLedgerStorageEnabled=ture
 
-## NIO Server settings
+# The skip list data size limitation (default 64MB) in EntryMemTable
+# skipListSizeLimit=67108864L
 
-# This settings is used to enabled/disabled Nagle's algorithm, which is a means of
-# improving the efficiency of TCP/IP networks by reducing the number of packets
-# that need to be sent over the network.
-# If you are sending many small messages, such that more than one can fit in
-# a single IP packet, setting server.tcpnodelay to false to enable Nagle algorithm
-# can provide better performance.
-# Default value is true.
-#
-serverTcpNoDelay=true
-
-## ledger cache settings
+#############################################################################
+## Ledger cache settings
+#############################################################################
 
 # Max number of ledger index files could be opened in bookie server
 # If number of ledger index files reaches this limitation, bookie
@@ -258,51 +396,40 @@ openFileLimit=0
 # the limitation of number of index pages.
 pageLimit=0
 
-#If all ledger directories configured are full, then support only read requests for clients.
-#If "readOnlyModeEnabled=true" then on all ledger disks full, bookie will be converted
-#to read-only mode and serve only read requests. Otherwise the bookie will be shutdown.
-#By default this will be disabled.
-readOnlyModeEnabled=true
+#############################################################################
+## Ledger manager settings
+#############################################################################
 
-#For each ledger dir, maximum disk space which can be used.
-#Default is 0.95f. i.e. 95% of disk can be used at most after which nothing will
-#be written to that partition. If all ledger dir partions are full, then bookie
-#will turn to readonly mode if 'readOnlyModeEnabled=true' is set, else it will
-#shutdown.
-#Valid values should be in between 0 and 1 (exclusive).
-diskUsageThreshold=0.95
+# Ledger Manager Class
+# What kind of ledger manager is used to manage how ledgers are stored, managed
+# and garbage collected. Try to read 'BookKeeper Internals' for detail info.
+ledgerManagerFactoryClass=org.apache.bookkeeper.meta.HierarchicalLedgerManagerFactory
 
-#Disk check interval in milli seconds, interval to check the ledger dirs usage.
-#Default is 10000
-diskCheckInterval=10000
+# @Drepcated - `ledgerManagerType` is deprecated in favor of using `ledgerManagerFactoryClass`.
+# ledgerManagerType=hierarchical
 
-# Interval at which the auditor will do a check of all ledgers in the cluster.
-# By default this runs once a week. The interval is set in seconds.
-# To disable the periodic check completely, set this to 0.
-# Note that periodic checking will put extra load on the cluster, so it should
-# not be run more frequently than once a day.
-auditorPeriodicCheckInterval=604800
+# Root Zookeeper path to store ledger metadata
+# This parameter is used by zookeeper-based ledger manager as a root znode to
+# store all ledgers.
+zkLedgersRootPath=/ledgers
 
-# The interval between auditor bookie checks.
-# The auditor bookie check, checks ledger metadata to see which bookies should
-# contain entries for each ledger. If a bookie which should contain entries is
-# unavailable, then the ledger containing that entry is marked for recovery.
-# Setting this to 0 disabled the periodic check. Bookie checks will still
-# run when a bookie fails.
-# The interval is specified in seconds.
-auditorPeriodicBookieCheckInterval=86400
+#############################################################################
+## Entry log settings
+#############################################################################
 
-# number of threads that should handle write requests. if zero, the writes would
-# be handled by netty threads directly.
-numAddWorkerThreads=0
+# Max file size of entry logger, in bytes
+# A new entry log file will be created when the old one reaches the file size limitation
+logSizeLimit=1073741824
 
-# number of threads that should handle read requests. if zero, the reads would
-# be handled by netty threads directly.
-numReadWorkerThreads=8
+# Enable/Disable entry logger preallocation
+entryLogFilePreallocationEnabled=true
 
-# If read workers threads are enabled, limit the number of pending requests, to
-# avoid the executor queue to grow indefinitely
-maxPendingReadRequestsPerThread=2500
+# Entry log flush interval in bytes.
+# Default is 0. 0 or less disables this feature and effectively flush
+# happens on log rotation.
+# Flushing in smaller chunks but more frequently reduces spikes in disk
+# I/O. Flushing too frequently may also affect performance negatively.
+# flushEntrylogBytes=0
 
 # The number of bytes we should use as capacity for BufferedReadChannel. Default is 512 bytes.
 readBufferSizeBytes=4096
@@ -310,28 +437,180 @@ readBufferSizeBytes=4096
 # The number of bytes used as capacity for the write buffer. Default is 64KB.
 writeBufferSizeBytes=65536
 
-# Whether the bookie should use its hostname to register with the
-# co-ordination service(eg: zookeeper service).
-# When false, bookie will use its ipaddress for the registration.
-# Defaults to false.
-useHostNameAsBookieID=false
+# Specifies if entryLog per ledger is enabled/disabled. If it is enabled, then there would be a
+# active entrylog for each ledger. It would be ideal to enable this feature if the underlying
+# storage device has multiple DiskPartitions or SSD and if in a given moment, entries of fewer
+# number of active ledgers are written to a bookie.
+# entryLogPerLedgerEnabled=false
+
+#############################################################################
+## Entry log compaction settings
+#############################################################################
+
+# Set the rate at which compaction will readd entries. The unit is adds per second.
+compactionRate=1000
+
+# If bookie is using hostname for registration and in ledger metadata then
+# whether to use short hostname or FQDN hostname. Defaults to false.
+# useShortHostName=false
+
+# Threshold of minor compaction
+# For those entry log files whose remaining size percentage reaches below
+# this threshold will be compacted in a minor compaction.
+# If it is set to less than zero, the minor compaction is disabled.
+minorCompactionThreshold=0.2
+
+# Interval to run minor compaction, in seconds
+# If it is set to less than zero, the minor compaction is disabled.
+minorCompactionInterval=3600
+
+# Set the maximum number of entries which can be compacted without flushing.
+# When compacting, the entries are written to the entrylog and the new offsets
+# are cached in memory. Once the entrylog is flushed the index is updated with
+# the new offsets. This parameter controls the number of entries added to the
+# entrylog before a flush is forced. A higher value for this parameter means
+# more memory will be used for offsets. Each offset consists of 3 longs.
+# This parameter should _not_ be modified unless you know what you're doing.
+# The default is 100,000.
+compactionMaxOutstandingRequests=100000
+
+# Threshold of major compaction
+# For those entry log files whose remaining size percentage reaches below
+# this threshold will be compacted in a major compaction.
+# Those entry log files whose remaining size percentage is still
+# higher than the threshold will never be compacted.
+# If it is set to less than zero, the minor compaction is disabled.
+majorCompactionThreshold=0.5
+
+# Interval to run major compaction, in seconds
+# If it is set to less than zero, the major compaction is disabled.
+majorCompactionInterval=86400
 
-# Stats Provider Class
+# Throttle compaction by bytes or by entries.
+isThrottleByBytes=false
+
+# Set the rate at which compaction will readd entries. The unit is adds per second.
+compactionRateByEntries=1000
+
+# Set the rate at which compaction will readd entries. The unit is bytes added per second.
+compactionRateByBytes=1000000
+
+#############################################################################
+## Statistics
+#############################################################################
+
+# Whether statistics are enabled
+# enableStatistics=true
+
+# Stats Provider Class (if statistics are enabled)
 statsProviderClass=org.apache.bookkeeper.stats.prometheus.PrometheusMetricsProvider
+
 # Default port for Prometheus metrics exporter
 prometheusStatsHttpPort=8000
 
+#############################################################################
+## Read-only mode support
+#############################################################################
+
+# If all ledger directories configured are full, then support only read requests for clients.
+# If "readOnlyModeEnabled=true" then on all ledger disks full, bookie will be converted
+# to read-only mode and serve only read requests. Otherwise the bookie will be shutdown.
+# By default this will be disabled.
+readOnlyModeEnabled=true
+
+# Whether the bookie is force started in read only mode or not
+# forceReadOnlyBookie=false
+
+# Persiste the bookie status locally on the disks. So the bookies can keep their status upon restarts
+# @Since 4.6
+# persistBookieStatusEnabled=false
+
+#############################################################################
+## Disk utilization
+#############################################################################
+
+# For each ledger dir, maximum disk space which can be used.
+# Default is 0.95f. i.e. 95% of disk can be used at most after which nothing will
+# be written to that partition. If all ledger dir partions are full, then bookie
+# will turn to readonly mode if 'readOnlyModeEnabled=true' is set, else it will
+# shutdown.
+# Valid values should be in between 0 and 1 (exclusive).
+diskUsageThreshold=0.95
+
+# The disk free space low water mark threshold.
+# Disk is considered full when usage threshold is exceeded.
+# Disk returns back to non-full state when usage is below low water mark threshold.
+# This prevents it from going back and forth between these states frequently
+# when concurrent writes and compaction are happening. This also prevent bookie from
+# switching frequently between read-only and read-writes states in the same cases.
+# diskUsageWarnThreshold=0.95
+
+# Set the disk free space low water mark threshold. Disk is considered full when
+# usage threshold is exceeded. Disk returns back to non-full state when usage is
+# below low water mark threshold. This prevents it from going back and forth
+# between these states frequently when concurrent writes and compaction are
+# happening. This also prevent bookie from switching frequently between
+# read-only and read-writes states in the same cases.
+# diskUsageLwmThreshold=0.90
+
+# Disk check interval in milli seconds, interval to check the ledger dirs usage.
+# Default is 10000
+diskCheckInterval=10000
 
+#############################################################################
+## ZooKeeper parameters
+#############################################################################
+
+# A list of one of more servers on which Zookeeper is running.
+# The server list can be comma separated values, for example:
+# zkServers=zk1:2181,zk2:2181,zk3:2181
+zkServers={{ zookeeper_servers }}
+
+# ZooKeeper client session timeout in milliseconds
+# Bookie server will exit if it received SESSION_EXPIRED because it
+# was partitioned off from ZooKeeper for more than the session timeout
+# JVM garbage collection, disk I/O will cause SESSION_EXPIRED.
+# Increment this value could help avoiding this issue
+zkTimeout=30000
+
+# The Zookeeper client backoff retry start time in millis.
+# zkRetryBackoffStartMs=1000
+
+# The Zookeeper client backoff retry max time in millis.
+# zkRetryBackoffMaxMs=10000
+
+# Set ACLs on every node written on ZooKeeper, this way only allowed users
+# will be able to read and write BookKeeper metadata stored on ZooKeeper.
+# In order to make ACLs work you need to setup ZooKeeper JAAS authentication
+# all the bookies and Client need to share the same user, and this is usually
+# done using Kerberos authentication. See ZooKeeper documentation
+zkEnableSecurity=false
+
+#############################################################################
+## Server parameters
+#############################################################################
+
+# Configure a list of server components to enable and load on a bookie server.
+# This provides the plugin run extra services along with a bookie server.
+#
+# extraServerComponents=
+
+
+#############################################################################
 ## DB Ledger storage configuration
+#############################################################################
+
+# These configs are used when the selected 'ledgerStorageClass' is
+# org.apache.bookkeeper.bookie.storage.ldb.DbLedgerStorage
 
 # Size of Write Cache. Memory is allocated from JVM direct memory.
 # Write cache is used to buffer entries before flushing into the entry log
 # For good performance, it should be big enough to hold a sub
-# dbStorage_writeCacheMaxSizeMb=512
+dbStorage_writeCacheMaxSizeMb=512
 
 # Size of Read cache. Memory is allocated from JVM direct memory.
 # This read cache is pre-filled doing read-ahead whenever a cache miss happens
-# dbStorage_readAheadCacheMaxSizeMb=256
+dbStorage_readAheadCacheMaxSizeMb=256
 
 # How many entries to pre-fill in cache after a read cache miss
 dbStorage_readAheadCacheBatchSize=1000
@@ -343,9 +622,10 @@ dbStorage_readAheadCacheBatchSize=1000
 # Size of RocksDB block-cache. For best performance, this cache
 # should be big enough to hold a significant portion of the index
 # database which can reach ~2GB in some cases
-# 256 MBytes
-# dbStorage_rocksDB_blockCacheSize=268435456
+# Default is 256 MBytes
+dbStorage_rocksDB_blockCacheSize=268435456
 
+# Other RocksDB specific tunables
 dbStorage_rocksDB_writeBufferSizeMB=64
 dbStorage_rocksDB_sstSizeInMB=64
 dbStorage_rocksDB_blockSize=65536
diff --git a/deployment/terraform-ansible/templates/broker.conf b/deployment/terraform-ansible/templates/broker.conf
index d16810025b..73d4e2f524 100644
--- a/deployment/terraform-ansible/templates/broker.conf
+++ b/deployment/terraform-ansible/templates/broker.conf
@@ -17,18 +17,492 @@
 # under the License.
 #
 
-
-
-### Use all the broker defaults except for the following
+### --- General broker settings --- ###
 
 # Zookeeper quorum connection string
 zookeeperServers={{ zookeeper_servers }}
 
-# Global Zookeeper quorum connection string
+# Configuration Store connection string
 configurationStoreServers={{ zookeeper_servers }}
 
+# Broker data port
+brokerServicePort=6650
+
+# Broker data port for TLS
+brokerServicePortTls=6651
+
+# Port to use to server HTTP request
+webServicePort=8080
+
+# Port to use to server HTTPS request
+webServicePortTls=8443
+
+# Hostname or IP address the service binds on, default is 0.0.0.0.
+bindAddress=0.0.0.0
+
 # Hostname or IP address the service advertises to the outside world. If not set, the value of InetAddress.getLocalHost().getHostName() is used.
 advertisedAddress={{ hostvars[inventory_hostname].public_ip }}
 
 # Name of the cluster to which this broker belongs to
 clusterName=local
+
+# Enable cluster's failure-domain which can distribute brokers into logical region
+failureDomainsEnabled=false
+
+# Zookeeper session timeout in milliseconds
+zooKeeperSessionTimeoutMillis=30000
+
+# Time to wait for broker graceful shutdown. After this time elapses, the process will be killed
+brokerShutdownTimeoutMs=3000
+
+# Enable backlog quota check. Enforces action on topic when the quota is reached
+backlogQuotaCheckEnabled=true
+
+# How often to check for topics that have reached the quota
+backlogQuotaCheckIntervalInSeconds=60
+
+# Default per-topic backlog quota limit
+backlogQuotaDefaultLimitGB=10
+
+# Enable the deletion of inactive topics
+brokerDeleteInactiveTopicsEnabled=true
+
+# How often to check for inactive topics
+brokerDeleteInactiveTopicsFrequencySeconds=60
+
+# How frequently to proactively check and purge expired messages
+messageExpiryCheckIntervalInMinutes=5
+
+# How long to delay rewinding cursor and dispatching messages when active consumer is changed
+activeConsumerFailoverDelayTimeMillis=1000
+
+# How long to delete inactive subscriptions from last consuming
+# When it is 0, inactive subscriptions are not deleted automatically
+subscriptionExpirationTimeMinutes=0
+
+# How frequently to proactively check and purge expired subscription
+subscriptionExpiryCheckIntervalInMinutes=5
+
+# Set the default behavior for message deduplication in the broker
+# This can be overridden per-namespace. If enabled, broker will reject
+# messages that were already stored in the topic
+brokerDeduplicationEnabled=false
+
+# Maximum number of producer information that it's going to be
+# persisted for deduplication purposes
+brokerDeduplicationMaxNumberOfProducers=10000
+
+# Number of entries after which a dedup info snapshot is taken.
+# A larger interval will lead to fewer snapshots being taken, though it would
+# increase the topic recovery time when the entries published after the
+# snapshot need to be replayed.
+brokerDeduplicationEntriesInterval=1000
+
+# Time of inactivity after which the broker will discard the deduplication information
+# relative to a disconnected producer. Default is 6 hours.
+brokerDeduplicationProducerInactivityTimeoutMinutes=360
+
+# When a namespace is created without specifying the number of bundle, this
+# value will be used as the default
+defaultNumberOfNamespaceBundles=4
+
+# Enable check for minimum allowed client library version
+clientLibraryVersionCheckEnabled=false
+
+# Path for the file used to determine the rotation status for the broker when responding
+# to service discovery health checks
+statusFilePath=
+
+# If true, (and ModularLoadManagerImpl is being used), the load manager will attempt to
+# use only brokers running the latest software version (to minimize impact to bundles)
+preferLaterVersions=false
+
+# Max number of unacknowledged messages allowed to receive messages by a consumer on a shared subscription. Broker will stop sending
+# messages to consumer once, this limit reaches until consumer starts acknowledging messages back.
+# Using a value of 0, is disabling unackeMessage limit check and consumer can receive messages without any restriction
+maxUnackedMessagesPerConsumer=50000
+
+# Max number of unacknowledged messages allowed per shared subscription. Broker will stop dispatching messages to
+# all consumers of the subscription once this limit reaches until consumer starts acknowledging messages back and
+# unack count reaches to limit/2. Using a value of 0, is disabling unackedMessage-limit
+# check and dispatcher can dispatch messages without any restriction
+maxUnackedMessagesPerSubscription=200000
+
+# Max number of unacknowledged messages allowed per broker. Once this limit reaches, broker will stop dispatching
+# messages to all shared subscription which has higher number of unack messages until subscriptions start
+# acknowledging messages back and unack count reaches to limit/2. Using a value of 0, is disabling
+# unackedMessage-limit check and broker doesn't block dispatchers
+maxUnackedMessagesPerBroker=0
+
+# Once broker reaches maxUnackedMessagesPerBroker limit, it blocks subscriptions which has higher unacked messages
+# than this percentage limit and subscription will not receive any new messages until that subscription acks back
+# limit/2 messages
+maxUnackedMessagesPerSubscriptionOnBrokerBlocked=0.16
+
+# Default messages per second dispatch throttling-limit for every topic. Using a value of 0, is disabling default
+# message dispatch-throttling
+dispatchThrottlingRatePerTopicInMsg=0
+
+# Default bytes per second dispatch throttling-limit for every topic. Using a value of 0, is disabling
+# default message-byte dispatch-throttling
+dispatchThrottlingRatePerTopicInByte=0
+
+# By default we enable dispatch-throttling for both caught up consumers as well as consumers who have
+# backlog.
+dispatchThrottlingOnNonBacklogConsumerEnabled=true
+
+# Max number of concurrent lookup request broker allows to throttle heavy incoming lookup traffic
+maxConcurrentLookupRequest=10000
+
+# Max number of concurrent topic loading request broker allows to control number of zk-operations
+maxConcurrentTopicLoadRequest=5000
+
+# Max concurrent non-persistent message can be processed per connection
+maxConcurrentNonPersistentMessagePerConnection=1000
+
+# Number of worker threads to serve non-persistent topic
+numWorkerThreadsForNonPersistentTopic=8
+
+# Enable broker to load persistent topics
+enablePersistentTopics=true
+
+# Enable broker to load non-persistent topics
+enableNonPersistentTopics=true
+
+# Enable to run bookie along with broker
+enableRunBookieTogether=false
+
+# Enable to run bookie autorecovery along with broker
+enableRunBookieAutoRecoveryTogether=false
+
+# Max number of producers allowed to connect to topic. Once this limit reaches, Broker will reject new producers
+# until the number of connected producers decrease.
+# Using a value of 0, is disabling maxProducersPerTopic-limit check.
+maxProducersPerTopic=0
+
+# Max number of consumers allowed to connect to topic. Once this limit reaches, Broker will reject new consumers
+# until the number of connected consumers decrease.
+# Using a value of 0, is disabling maxConsumersPerTopic-limit check.
+maxConsumersPerTopic=0
+
+# Max number of consumers allowed to connect to subscription. Once this limit reaches, Broker will reject new consumers
+# until the number of connected consumers decrease.
+# Using a value of 0, is disabling maxConsumersPerSubscription-limit check.
+maxConsumersPerSubscription=0
+
+### --- Authentication --- ###
+# Role names that are treated as "proxy roles". If the broker sees a request with
+#role as proxyRoles - it will demand to see a valid original principal.
+proxyRoles=
+
+# If this flag is set then the broker authenticates the original Auth data
+# else it just accepts the originalPrincipal and authorizes it (if required).
+authenticateOriginalAuthData=false
+
+# Enable TLS
+tlsEnabled=false
+
+# Path for the TLS certificate file
+tlsCertificateFilePath=
+
+# Path for the TLS private key file
+tlsKeyFilePath=
+
+# Path for the trusted TLS certificate file
+tlsTrustCertsFilePath=
+
+# Accept untrusted TLS certificate from client
+tlsAllowInsecureConnection=false
+
+# Specify whether Client certificates are required for TLS
+# Reject the Connection if the Client Certificate is not trusted.
+tlsRequireTrustedClientCertOnConnect=false
+### --- Authentication --- ###
+
+# Enable authentication
+authenticationEnabled=false
+
+# Autentication provider name list, which is comma separated list of class names
+authenticationProviders=
+
+# Enforce authorization
+authorizationEnabled=false
+
+# Authorization provider fully qualified class-name
+authorizationProvider=org.apache.pulsar.broker.authorization.PulsarAuthorizationProvider
+
+# Allow wildcard matching in authorization
+# (wildcard matching only applicable if wildcard-char:
+# * presents at first or last position eg: *.pulsar.service, pulsar.service.*)
+authorizationAllowWildcardsMatching=false
+
+# Role names that are treated as "super-user", meaning they will be able to do all admin
+# operations and publish/consume from all topics
+superUserRoles=
+
+# Authentication settings of the broker itself. Used when the broker connects to other brokers,
+# either in same or other clusters
+brokerClientAuthenticationPlugin=
+brokerClientAuthenticationParameters=
+brokerClientTrustCertsFilePath=
+
+# Supported Athenz provider domain names(comma separated) for authentication
+athenzDomainNames=
+
+# When this parameter is not empty, unauthenticated users perform as anonymousUserRole
+anonymousUserRole=
+
+### --- BookKeeper Client --- ###
+
+# Authentication plugin to use when connecting to bookies
+bookkeeperClientAuthenticationPlugin=
+
+# BookKeeper auth plugin implementatation specifics parameters name and values
+bookkeeperClientAuthenticationParametersName=
+bookkeeperClientAuthenticationParameters=
+
+# Timeout for BK add / read operations
+bookkeeperClientTimeoutInSeconds=30
+
+# Speculative reads are initiated if a read request doesn't complete within a certain time
+# Using a value of 0, is disabling the speculative reads
+bookkeeperClientSpeculativeReadTimeoutInMillis=0
+
+# Enable bookies health check. Bookies that have more than the configured number of failure within
+# the interval will be quarantined for some time. During this period, new ledgers won't be created
+# on these bookies
+bookkeeperClientHealthCheckEnabled=true
+bookkeeperClientHealthCheckIntervalSeconds=60
+bookkeeperClientHealthCheckErrorThresholdPerInterval=5
+bookkeeperClientHealthCheckQuarantineTimeInSeconds=1800
+
+# Enable rack-aware bookie selection policy. BK will chose bookies from different racks when
+# forming a new bookie ensemble
+bookkeeperClientRackawarePolicyEnabled=true
+
+# Enable bookie isolation by specifying a list of bookie groups to choose from. Any bookie
+# outside the specified groups will not be used by the broker
+bookkeeperClientIsolationGroups=
+
+### --- Managed Ledger --- ###
+
+# Number of bookies to use when creating a ledger
+managedLedgerDefaultEnsembleSize=2
+
+# Number of copies to store for each message
+managedLedgerDefaultWriteQuorum=2
+
+# Number of guaranteed copies (acks to wait before write is complete)
+managedLedgerDefaultAckQuorum=2
+
+# Default type of checksum to use when writing to BookKeeper. Default is "CRC32"
+# Other possible options are "CRC32C" (which is faster), "MAC" or "DUMMY" (no checksum).
+managedLedgerDigestType=CRC32
+
+# Amount of memory to use for caching data payload in managed ledger. This memory
+# is allocated from JVM direct memory and it's shared across all the topics
+# running  in the same broker
+managedLedgerCacheSizeMB=1024
+
+# Threshold to which bring down the cache level when eviction is triggered
+managedLedgerCacheEvictionWatermark=0.9
+
+# Rate limit the amount of writes per second generated by consumer acking the messages
+managedLedgerDefaultMarkDeleteRateLimit=1.0
+
+# Max number of entries to append to a ledger before triggering a rollover
+# A ledger rollover is triggered on these conditions
+#  * Either the max rollover time has been reached
+#  * or max entries have been written to the ledged and at least min-time
+#    has passed
+managedLedgerMaxEntriesPerLedger=50000
+
+# Minimum time between ledger rollover for a topic
+managedLedgerMinLedgerRolloverTimeMinutes=10
+
+# Maximum time before forcing a ledger rollover for a topic
+managedLedgerMaxLedgerRolloverTimeMinutes=240
+
+# Delay between a ledger being successfully offloaded to long term storage
+# and the ledger being deleted from bookkeeper (default is 4 hours)
+managedLedgerOffloadDeletionLagMs=14400000
+
+# Max number of entries to append to a cursor ledger
+managedLedgerCursorMaxEntriesPerLedger=50000
+
+# Max time before triggering a rollover on a cursor ledger
+managedLedgerCursorRolloverTimeInSeconds=14400
+
+# Max number of "acknowledgment holes" that are going to be persistently stored.
+# When acknowledging out of order, a consumer will leave holes that are supposed
+# to be quickly filled by acking all the messages. The information of which
+# messages are acknowledged is persisted by compressing in "ranges" of messages
+# that were acknowledged. After the max number of ranges is reached, the information
+# will only be tracked in memory and messages will be redelivered in case of
+# crashes.
+managedLedgerMaxUnackedRangesToPersist=10000
+
+# Max number of "acknowledgment holes" that can be stored in Zookeeper. If number of unack message range is higher
+# than this limit then broker will persist unacked ranges into bookkeeper to avoid additional data overhead into
+# zookeeper.
+managedLedgerMaxUnackedRangesToPersistInZooKeeper=1000
+
+# Skip reading non-recoverable/unreadable data-ledger under managed-ledger's list. It helps when data-ledgers gets
+# corrupted at bookkeeper and managed-cursor is stuck at that ledger.
+autoSkipNonRecoverableData=false
+
+### --- Load balancer --- ###
+
+# Enable load balancer
+loadBalancerEnabled=true
+
+# Percentage of change to trigger load report update
+loadBalancerReportUpdateThresholdPercentage=10
+
+# maximum interval to update load report
+loadBalancerReportUpdateMaxIntervalMinutes=15
+
+# Frequency of report to collect
+loadBalancerHostUsageCheckIntervalMinutes=1
+
+# Enable/disable automatic bundle unloading for load-shedding
+loadBalancerSheddingEnabled=true
+
+# Load shedding interval. Broker periodically checks whether some traffic should be offload from
+# some over-loaded broker to other under-loaded brokers
+loadBalancerSheddingIntervalMinutes=1
+
+# Prevent the same topics to be shed and moved to other broker more that once within this timeframe
+loadBalancerSheddingGracePeriodMinutes=30
+
+# Usage threshold to allocate max number of topics to broker
+loadBalancerBrokerMaxTopics=50000
+
+# Usage threshold to determine a broker as over-loaded
+loadBalancerBrokerOverloadedThresholdPercentage=85
+
+# Interval to flush dynamic resource quota to ZooKeeper
+loadBalancerResourceQuotaUpdateIntervalMinutes=15
+
+# enable/disable namespace bundle auto split
+loadBalancerAutoBundleSplitEnabled=true
+
+# enable/disable automatic unloading of split bundles
+loadBalancerAutoUnloadSplitBundlesEnabled=true
+
+# maximum topics in a bundle, otherwise bundle split will be triggered
+loadBalancerNamespaceBundleMaxTopics=1000
+
+# maximum sessions (producers + consumers) in a bundle, otherwise bundle split will be triggered
+loadBalancerNamespaceBundleMaxSessions=1000
+
+# maximum msgRate (in + out) in a bundle, otherwise bundle split will be triggered
+loadBalancerNamespaceBundleMaxMsgRate=30000
+
+# maximum bandwidth (in + out) in a bundle, otherwise bundle split will be triggered
+loadBalancerNamespaceBundleMaxBandwidthMbytes=100
+
+# maximum number of bundles in a namespace
+loadBalancerNamespaceMaximumBundles=128
+
+# Override the auto-detection of the network interfaces max speed.
+# This option is useful in some environments (eg: EC2 VMs) where the max speed
+# reported by Linux is not reflecting the real bandwidth available to the broker.
+# Since the network usage is employed by the load manager to decide when a broker
+# is overloaded, it is important to make sure the info is correct or override it
+# with the right value here. The configured value can be a double (eg: 0.8) and that
+# can be used to trigger load-shedding even before hitting on NIC limits.
+loadBalancerOverrideBrokerNicSpeedGbps=
+
+# Name of load manager to use
+loadManagerClassName=org.apache.pulsar.broker.loadbalance.impl.ModularLoadManagerImpl
+
+### --- Replication --- ###
+
+# Enable replication metrics
+replicationMetricsEnabled=true
+
+# Max number of connections to open for each broker in a remote cluster
+# More connections host-to-host lead to better throughput over high-latency
+# links.
+replicationConnectionsPerBroker=16
+
+# Replicator producer queue size
+replicationProducerQueueSize=1000
+
+# Replicator prefix used for replicator producer name and cursor name
+replicatorPrefix=pulsar.repl
+
+# Enable TLS when talking with other clusters to replicate messages
+replicationTlsEnabled=false
+
+# Default message retention time
+defaultRetentionTimeInMinutes=0
+
+# Default retention size
+defaultRetentionSizeInMB=0
+
+# How often to check whether the connections are still alive
+keepAliveIntervalSeconds=30
+
+# How often broker checks for inactive topics to be deleted (topics with no subscriptions and no one connected)
+brokerServicePurgeInactiveFrequencyInSeconds=60
+
+### --- WebSocket --- ###
+
+# Enable the WebSocket API service in broker
+webSocketServiceEnabled=false
+
+# Number of IO threads in Pulsar Client used in WebSocket proxy
+webSocketNumIoThreads=8
+
+# Number of connections per Broker in Pulsar Client used in WebSocket proxy
+webSocketConnectionsPerBroker=8
+
+
+### --- Metrics --- ###
+
+# Enable topic level metrics
+exposeTopicLevelMetricsInPrometheus=true
+
+### --- Functions --- ###
+
+# Enable Functions Worker Service in Broker
+functionsWorkerEnabled=false
+
+### --- Broker Web Stats --- ###
+
+# Enable topic level metrics
+exposePublisherStats=true
+
+### --- Schema storage --- ###
+# The schema storage implementation used by this broker
+schemaRegistryStorageClassName=org.apache.pulsar.broker.service.schema.BookkeeperSchemaStorageFactory
+
+### --- Ledger Offloading --- ###
+
+# Driver to use to offload old data to long term storage (Possible values: S3)
+managedLedgerOffloadDriver=
+
+# Maximum number of thread pool threads for ledger offloading
+managedLedgerOffloadMaxThreads=2
+
+# For Amazon S3 ledger offload, AWS region
+s3ManagedLedgerOffloadRegion=
+
+# For Amazon S3 ledger offload, Bucket to place offloaded ledger into
+s3ManagedLedgerOffloadBucket=
+
+# For Amazon S3 ledger offload, Alternative endpoint to connect to (useful for testing)
+s3ManagedLedgerOffloadServiceEndpoint=
+
+# For Amazon S3 ledger offload, Max block size in bytes. (64MB by default, 5MB minimum)
+s3ManagedLedgerOffloadMaxBlockSizeInBytes=67108864
+
+# For Amazon S3 ledger offload, Read buffer size in bytes (1MB by default)
+s3ManagedLedgerOffloadReadBufferSizeInBytes=1048576
+
+### --- Deprecated config variables --- ###
+
+# Deprecated. Use configurationStoreServers
+globalZookeeperServers={{ zookeeper_servers }}
diff --git a/deployment/terraform-ansible/templates/client.conf b/deployment/terraform-ansible/templates/client.conf
index 81c65772af..cfba8224cb 100644
--- a/deployment/terraform-ansible/templates/client.conf
+++ b/deployment/terraform-ansible/templates/client.conf
@@ -17,8 +17,12 @@
 # under the License.
 #
 
-
-# Pulsar Client configuration
+# Pulsar Client and pulsar-admin configuration
 webServiceUrl=http://{{ hostvars[groups['pulsar'][0]].private_ip }}:8080/
-
 brokerServiceUrl=pulsar://{{ hostvars[groups['pulsar'][0]].private_ip }}:6650/
+#authPlugin=
+#authParams=
+#useTls=
+tlsAllowInsecureConnection=false
+tlsEnableHostnameVerification=false
+#tlsTrustCertsFilePath
diff --git a/deployment/terraform-ansible/templates/pulsar_env.sh b/deployment/terraform-ansible/templates/pulsar_env.sh
index d6fb45fddf..4ad6e7b7da 100644
--- a/deployment/terraform-ansible/templates/pulsar_env.sh
+++ b/deployment/terraform-ansible/templates/pulsar_env.sh
@@ -45,10 +45,7 @@
 PULSAR_MEM=" -Xms{{ max_heap_memory }} -Xmx{{ max_heap_memory }} -XX:MaxDirectMemorySize={{ max_direct_memory }}"
 
 # Garbage collection options
-# PULSAR_GC=" -XX:+UseG1GC -XX:MaxGCPauseMillis=10 -XX:+ParallelRefProcEnabled -XX:+UnlockExperimentalVMOptions -XX:+AggressiveOpts -XX:+DoEscapeAnalysis -XX:ParallelGCThreads=32 -XX:ConcGCThreads=32 -XX:G1NewSizePercent=50 -XX:+DisableExplicitGC -XX:-ResizePLAB"
-PULSAR_GC=" -XX:+UseShenandoahGC -XX:+ParallelRefProcEnabled -XX:+UnlockExperimentalVMOptions -XX:+AggressiveOpts -XX:+DoEscapeAnalysis "
-PULSAR_GC="${PULSAR_GC} -XX:+PerfDisableSharedMem -XX:+AlwaysPreTouch -XX:-UseBiasedLocking"
-PULSAR_GC="${PULSAR_GC} -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=5 -XX:GCLogFileSize=30m -Xloggc:/dev/shm/gc_%p.log"
+PULSAR_GC=" -XX:+UseG1GC -XX:MaxGCPauseMillis=10 -XX:+ParallelRefProcEnabled -XX:+UnlockExperimentalVMOptions -XX:+AggressiveOpts -XX:+DoEscapeAnalysis -XX:ParallelGCThreads=32 -XX:ConcGCThreads=32 -XX:G1NewSizePercent=50 -XX:+DisableExplicitGC -XX:-ResizePLAB"
 
 # Extra options to be passed to the jvm
 PULSAR_EXTRA_OPTS="${PULSAR_EXTRA_OPTS} ${PULSAR_MEM} ${PULSAR_GC} -Dio.netty.leakDetectionLevel=disabled -Dio.netty.recycler.maxCapacity.default=1000 -Dio.netty.recycler.linkCapacity=1024"


 

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
users@infra.apache.org


With regards,
Apache Git Services