You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@pulsar.apache.org by mm...@apache.org on 2019/05/18 09:11:14 UTC

[pulsar] branch asf-site updated: Updated site at revision ce685dc

This is an automated email from the ASF dual-hosted git repository.

mmerli pushed a commit to branch asf-site
in repository https://gitbox.apache.org/repos/asf/pulsar.git


The following commit(s) were added to refs/heads/asf-site by this push:
     new 59984c4  Updated site at revision ce685dc
59984c4 is described below

commit 59984c42608eefc3a1b09a4253e986619a029cdb
Author: jenkins <bu...@apache.org>
AuthorDate: Sat May 18 09:11:05 2019 +0000

    Updated site at revision ce685dc
---
 content/docs/en/next/reference-configuration.html  |  6 +--
 .../en/next/reference-configuration/index.html     |  6 +--
 content/docs/fr/next/reference-configuration.html  |  6 +--
 .../fr/next/reference-configuration/index.html     |  6 +--
 content/docs/ja/next/reference-configuration.html  |  6 +--
 .../ja/next/reference-configuration/index.html     |  6 +--
 content/docs/zh-CN/2.1.0-incubating/io-kafka.html  |  2 +-
 .../zh-CN/2.1.0-incubating/io-kafka/index.html     |  2 +-
 content/docs/zh-CN/2.1.1-incubating/io-kafka.html  |  2 +-
 .../zh-CN/2.1.1-incubating/io-kafka/index.html     |  2 +-
 content/docs/zh-CN/2.2.0/io-kafka.html             |  2 +-
 content/docs/zh-CN/2.2.0/io-kafka/index.html       |  2 +-
 content/docs/zh-CN/2.2.1/io-kafka.html             |  2 +-
 content/docs/zh-CN/2.2.1/io-kafka/index.html       |  2 +-
 content/docs/zh-CN/2.3.0/io-kafka.html             |  2 +-
 content/docs/zh-CN/2.3.0/io-kafka/index.html       |  2 +-
 content/docs/zh-CN/io-kafka.html                   |  2 +-
 content/docs/zh-CN/io-kafka/index.html             |  2 +-
 content/docs/zh-CN/next/io-kafka.html              |  2 +-
 content/docs/zh-CN/next/io-kafka/index.html        |  2 +-
 .../docs/zh-CN/next/reference-configuration.html   |  6 +--
 .../zh-CN/next/reference-configuration/index.html  |  6 +--
 content/swagger/swagger.json                       | 48 ++++++++++-----------
 content/swagger/swaggerfunctions.json              | 49 +++++++++++++++-------
 24 files changed, 96 insertions(+), 77 deletions(-)

diff --git a/content/docs/en/next/reference-configuration.html b/content/docs/en/next/reference-configuration.html
index 890d265..30a160e 100644
--- a/content/docs/en/next/reference-configuration.html
+++ b/content/docs/en/next/reference-configuration.html
@@ -146,10 +146,10 @@
 <tr><td>useHostNameAsBookieID</td><td>Whether the bookie should use its hostname to register with the coordination service (e.g.: zookeeper service). When false, bookie will use its ipaddress for the registration.</td><td>false</td></tr>
 <tr><td>statsProviderClass</td><td></td><td>org.apache.bookkeeper.stats.prometheus.PrometheusMetricsProvider</td></tr>
 <tr><td>prometheusStatsHttpPort</td><td></td><td>8000</td></tr>
-<tr><td>dbStorage_writeCacheMaxSizeMb</td><td>Size of Write Cache. Memory is allocated from JVM direct memory. Write cache is used to buffer entries before flushing into the entry log For good performance, it should be big enough to hold a sub</td><td>512</td></tr>
-<tr><td>dbStorage_readAheadCacheMaxSizeMb</td><td>Size of Read cache. Memory is allocated from JVM direct memory. This read cache is pre-filled doing read-ahead whenever a cache miss happens</td><td>256</td></tr>
+<tr><td>dbStorage_writeCacheMaxSizeMb</td><td>Size of Write Cache. Memory is allocated from JVM direct memory. Write cache is used to buffer entries before flushing into the entry log For good performance, it should be big enough to hold a sub</td><td>25% of direct memory</td></tr>
+<tr><td>dbStorage_readAheadCacheMaxSizeMb</td><td>Size of Read cache. Memory is allocated from JVM direct memory. This read cache is pre-filled doing read-ahead whenever a cache miss happens</td><td>25% of direct memory</td></tr>
 <tr><td>dbStorage_readAheadCacheBatchSize</td><td>How many entries to pre-fill in cache after a read cache miss</td><td>1000</td></tr>
-<tr><td>dbStorage_rocksDB_blockCacheSize</td><td>Size of RocksDB block-cache. For best performance, this cache should be big enough to hold a significant portion of the index database which can reach ~2GB in some cases</td><td>268435456</td></tr>
+<tr><td>dbStorage_rocksDB_blockCacheSize</td><td>Size of RocksDB block-cache. For best performance, this cache should be big enough to hold a significant portion of the index database which can reach ~2GB in some cases</td><td>10% of direct memory</td></tr>
 <tr><td>dbStorage_rocksDB_writeBufferSizeMB</td><td></td><td>64</td></tr>
 <tr><td>dbStorage_rocksDB_sstSizeInMB</td><td></td><td>64</td></tr>
 <tr><td>dbStorage_rocksDB_blockSize</td><td></td><td>65536</td></tr>
diff --git a/content/docs/en/next/reference-configuration/index.html b/content/docs/en/next/reference-configuration/index.html
index 890d265..30a160e 100644
--- a/content/docs/en/next/reference-configuration/index.html
+++ b/content/docs/en/next/reference-configuration/index.html
@@ -146,10 +146,10 @@
 <tr><td>useHostNameAsBookieID</td><td>Whether the bookie should use its hostname to register with the coordination service (e.g.: zookeeper service). When false, bookie will use its ipaddress for the registration.</td><td>false</td></tr>
 <tr><td>statsProviderClass</td><td></td><td>org.apache.bookkeeper.stats.prometheus.PrometheusMetricsProvider</td></tr>
 <tr><td>prometheusStatsHttpPort</td><td></td><td>8000</td></tr>
-<tr><td>dbStorage_writeCacheMaxSizeMb</td><td>Size of Write Cache. Memory is allocated from JVM direct memory. Write cache is used to buffer entries before flushing into the entry log For good performance, it should be big enough to hold a sub</td><td>512</td></tr>
-<tr><td>dbStorage_readAheadCacheMaxSizeMb</td><td>Size of Read cache. Memory is allocated from JVM direct memory. This read cache is pre-filled doing read-ahead whenever a cache miss happens</td><td>256</td></tr>
+<tr><td>dbStorage_writeCacheMaxSizeMb</td><td>Size of Write Cache. Memory is allocated from JVM direct memory. Write cache is used to buffer entries before flushing into the entry log For good performance, it should be big enough to hold a sub</td><td>25% of direct memory</td></tr>
+<tr><td>dbStorage_readAheadCacheMaxSizeMb</td><td>Size of Read cache. Memory is allocated from JVM direct memory. This read cache is pre-filled doing read-ahead whenever a cache miss happens</td><td>25% of direct memory</td></tr>
 <tr><td>dbStorage_readAheadCacheBatchSize</td><td>How many entries to pre-fill in cache after a read cache miss</td><td>1000</td></tr>
-<tr><td>dbStorage_rocksDB_blockCacheSize</td><td>Size of RocksDB block-cache. For best performance, this cache should be big enough to hold a significant portion of the index database which can reach ~2GB in some cases</td><td>268435456</td></tr>
+<tr><td>dbStorage_rocksDB_blockCacheSize</td><td>Size of RocksDB block-cache. For best performance, this cache should be big enough to hold a significant portion of the index database which can reach ~2GB in some cases</td><td>10% of direct memory</td></tr>
 <tr><td>dbStorage_rocksDB_writeBufferSizeMB</td><td></td><td>64</td></tr>
 <tr><td>dbStorage_rocksDB_sstSizeInMB</td><td></td><td>64</td></tr>
 <tr><td>dbStorage_rocksDB_blockSize</td><td></td><td>65536</td></tr>
diff --git a/content/docs/fr/next/reference-configuration.html b/content/docs/fr/next/reference-configuration.html
index e5e2b86..7782ee2 100644
--- a/content/docs/fr/next/reference-configuration.html
+++ b/content/docs/fr/next/reference-configuration.html
@@ -146,10 +146,10 @@
 <tr><td>useHostNameAsBookieID</td><td>Whether the bookie should use its hostname to register with the coordination service (e.g.: zookeeper service). When false, bookie will use its ipaddress for the registration.</td><td>false</td></tr>
 <tr><td>statsProviderClass</td><td></td><td>org.apache.bookkeeper.stats.prometheus.PrometheusMetricsProvider</td></tr>
 <tr><td>prometheusStatsHttpPort</td><td></td><td>8000</td></tr>
-<tr><td>dbStorage_writeCacheMaxSizeMb</td><td>Size of Write Cache. Memory is allocated from JVM direct memory. Write cache is used to buffer entries before flushing into the entry log For good performance, it should be big enough to hold a sub</td><td>512</td></tr>
-<tr><td>dbStorage_readAheadCacheMaxSizeMb</td><td>Size of Read cache. Memory is allocated from JVM direct memory. This read cache is pre-filled doing read-ahead whenever a cache miss happens</td><td>256</td></tr>
+<tr><td>dbStorage_writeCacheMaxSizeMb</td><td>Size of Write Cache. Memory is allocated from JVM direct memory. Write cache is used to buffer entries before flushing into the entry log For good performance, it should be big enough to hold a sub</td><td>25% of direct memory</td></tr>
+<tr><td>dbStorage_readAheadCacheMaxSizeMb</td><td>Size of Read cache. Memory is allocated from JVM direct memory. This read cache is pre-filled doing read-ahead whenever a cache miss happens</td><td>25% of direct memory</td></tr>
 <tr><td>dbStorage_readAheadCacheBatchSize</td><td>How many entries to pre-fill in cache after a read cache miss</td><td>1000</td></tr>
-<tr><td>dbStorage_rocksDB_blockCacheSize</td><td>Size of RocksDB block-cache. For best performance, this cache should be big enough to hold a significant portion of the index database which can reach ~2GB in some cases</td><td>268435456</td></tr>
+<tr><td>dbStorage_rocksDB_blockCacheSize</td><td>Size of RocksDB block-cache. For best performance, this cache should be big enough to hold a significant portion of the index database which can reach ~2GB in some cases</td><td>10% of direct memory</td></tr>
 <tr><td>dbStorage_rocksDB_writeBufferSizeMB</td><td></td><td>64</td></tr>
 <tr><td>dbStorage_rocksDB_sstSizeInMB</td><td></td><td>64</td></tr>
 <tr><td>dbStorage_rocksDB_blockSize</td><td></td><td>65536</td></tr>
diff --git a/content/docs/fr/next/reference-configuration/index.html b/content/docs/fr/next/reference-configuration/index.html
index e5e2b86..7782ee2 100644
--- a/content/docs/fr/next/reference-configuration/index.html
+++ b/content/docs/fr/next/reference-configuration/index.html
@@ -146,10 +146,10 @@
 <tr><td>useHostNameAsBookieID</td><td>Whether the bookie should use its hostname to register with the coordination service (e.g.: zookeeper service). When false, bookie will use its ipaddress for the registration.</td><td>false</td></tr>
 <tr><td>statsProviderClass</td><td></td><td>org.apache.bookkeeper.stats.prometheus.PrometheusMetricsProvider</td></tr>
 <tr><td>prometheusStatsHttpPort</td><td></td><td>8000</td></tr>
-<tr><td>dbStorage_writeCacheMaxSizeMb</td><td>Size of Write Cache. Memory is allocated from JVM direct memory. Write cache is used to buffer entries before flushing into the entry log For good performance, it should be big enough to hold a sub</td><td>512</td></tr>
-<tr><td>dbStorage_readAheadCacheMaxSizeMb</td><td>Size of Read cache. Memory is allocated from JVM direct memory. This read cache is pre-filled doing read-ahead whenever a cache miss happens</td><td>256</td></tr>
+<tr><td>dbStorage_writeCacheMaxSizeMb</td><td>Size of Write Cache. Memory is allocated from JVM direct memory. Write cache is used to buffer entries before flushing into the entry log For good performance, it should be big enough to hold a sub</td><td>25% of direct memory</td></tr>
+<tr><td>dbStorage_readAheadCacheMaxSizeMb</td><td>Size of Read cache. Memory is allocated from JVM direct memory. This read cache is pre-filled doing read-ahead whenever a cache miss happens</td><td>25% of direct memory</td></tr>
 <tr><td>dbStorage_readAheadCacheBatchSize</td><td>How many entries to pre-fill in cache after a read cache miss</td><td>1000</td></tr>
-<tr><td>dbStorage_rocksDB_blockCacheSize</td><td>Size of RocksDB block-cache. For best performance, this cache should be big enough to hold a significant portion of the index database which can reach ~2GB in some cases</td><td>268435456</td></tr>
+<tr><td>dbStorage_rocksDB_blockCacheSize</td><td>Size of RocksDB block-cache. For best performance, this cache should be big enough to hold a significant portion of the index database which can reach ~2GB in some cases</td><td>10% of direct memory</td></tr>
 <tr><td>dbStorage_rocksDB_writeBufferSizeMB</td><td></td><td>64</td></tr>
 <tr><td>dbStorage_rocksDB_sstSizeInMB</td><td></td><td>64</td></tr>
 <tr><td>dbStorage_rocksDB_blockSize</td><td></td><td>65536</td></tr>
diff --git a/content/docs/ja/next/reference-configuration.html b/content/docs/ja/next/reference-configuration.html
index bdb6d86..541ba06 100644
--- a/content/docs/ja/next/reference-configuration.html
+++ b/content/docs/ja/next/reference-configuration.html
@@ -146,10 +146,10 @@
 <tr><td>useHostNameAsBookieID</td><td>Whether the bookie should use its hostname to register with the coordination service (e.g.: zookeeper service). When false, bookie will use its ipaddress for the registration.</td><td>false</td></tr>
 <tr><td>statsProviderClass</td><td></td><td>org.apache.bookkeeper.stats.prometheus.PrometheusMetricsProvider</td></tr>
 <tr><td>prometheusStatsHttpPort</td><td></td><td>8000</td></tr>
-<tr><td>dbStorage_writeCacheMaxSizeMb</td><td>Size of Write Cache. Memory is allocated from JVM direct memory. Write cache is used to buffer entries before flushing into the entry log For good performance, it should be big enough to hold a sub</td><td>512</td></tr>
-<tr><td>dbStorage_readAheadCacheMaxSizeMb</td><td>Size of Read cache. Memory is allocated from JVM direct memory. This read cache is pre-filled doing read-ahead whenever a cache miss happens</td><td>256</td></tr>
+<tr><td>dbStorage_writeCacheMaxSizeMb</td><td>Size of Write Cache. Memory is allocated from JVM direct memory. Write cache is used to buffer entries before flushing into the entry log For good performance, it should be big enough to hold a sub</td><td>25% of direct memory</td></tr>
+<tr><td>dbStorage_readAheadCacheMaxSizeMb</td><td>Size of Read cache. Memory is allocated from JVM direct memory. This read cache is pre-filled doing read-ahead whenever a cache miss happens</td><td>25% of direct memory</td></tr>
 <tr><td>dbStorage_readAheadCacheBatchSize</td><td>How many entries to pre-fill in cache after a read cache miss</td><td>1000</td></tr>
-<tr><td>dbStorage_rocksDB_blockCacheSize</td><td>Size of RocksDB block-cache. For best performance, this cache should be big enough to hold a significant portion of the index database which can reach ~2GB in some cases</td><td>268435456</td></tr>
+<tr><td>dbStorage_rocksDB_blockCacheSize</td><td>Size of RocksDB block-cache. For best performance, this cache should be big enough to hold a significant portion of the index database which can reach ~2GB in some cases</td><td>10% of direct memory</td></tr>
 <tr><td>dbStorage_rocksDB_writeBufferSizeMB</td><td></td><td>64</td></tr>
 <tr><td>dbStorage_rocksDB_sstSizeInMB</td><td></td><td>64</td></tr>
 <tr><td>dbStorage_rocksDB_blockSize</td><td></td><td>65536</td></tr>
diff --git a/content/docs/ja/next/reference-configuration/index.html b/content/docs/ja/next/reference-configuration/index.html
index bdb6d86..541ba06 100644
--- a/content/docs/ja/next/reference-configuration/index.html
+++ b/content/docs/ja/next/reference-configuration/index.html
@@ -146,10 +146,10 @@
 <tr><td>useHostNameAsBookieID</td><td>Whether the bookie should use its hostname to register with the coordination service (e.g.: zookeeper service). When false, bookie will use its ipaddress for the registration.</td><td>false</td></tr>
 <tr><td>statsProviderClass</td><td></td><td>org.apache.bookkeeper.stats.prometheus.PrometheusMetricsProvider</td></tr>
 <tr><td>prometheusStatsHttpPort</td><td></td><td>8000</td></tr>
-<tr><td>dbStorage_writeCacheMaxSizeMb</td><td>Size of Write Cache. Memory is allocated from JVM direct memory. Write cache is used to buffer entries before flushing into the entry log For good performance, it should be big enough to hold a sub</td><td>512</td></tr>
-<tr><td>dbStorage_readAheadCacheMaxSizeMb</td><td>Size of Read cache. Memory is allocated from JVM direct memory. This read cache is pre-filled doing read-ahead whenever a cache miss happens</td><td>256</td></tr>
+<tr><td>dbStorage_writeCacheMaxSizeMb</td><td>Size of Write Cache. Memory is allocated from JVM direct memory. Write cache is used to buffer entries before flushing into the entry log For good performance, it should be big enough to hold a sub</td><td>25% of direct memory</td></tr>
+<tr><td>dbStorage_readAheadCacheMaxSizeMb</td><td>Size of Read cache. Memory is allocated from JVM direct memory. This read cache is pre-filled doing read-ahead whenever a cache miss happens</td><td>25% of direct memory</td></tr>
 <tr><td>dbStorage_readAheadCacheBatchSize</td><td>How many entries to pre-fill in cache after a read cache miss</td><td>1000</td></tr>
-<tr><td>dbStorage_rocksDB_blockCacheSize</td><td>Size of RocksDB block-cache. For best performance, this cache should be big enough to hold a significant portion of the index database which can reach ~2GB in some cases</td><td>268435456</td></tr>
+<tr><td>dbStorage_rocksDB_blockCacheSize</td><td>Size of RocksDB block-cache. For best performance, this cache should be big enough to hold a significant portion of the index database which can reach ~2GB in some cases</td><td>10% of direct memory</td></tr>
 <tr><td>dbStorage_rocksDB_writeBufferSizeMB</td><td></td><td>64</td></tr>
 <tr><td>dbStorage_rocksDB_sstSizeInMB</td><td></td><td>64</td></tr>
 <tr><td>dbStorage_rocksDB_blockSize</td><td></td><td>65536</td></tr>
diff --git a/content/docs/zh-CN/2.1.0-incubating/io-kafka.html b/content/docs/zh-CN/2.1.0-incubating/io-kafka.html
index d458072..38e728d 100644
--- a/content/docs/zh-CN/2.1.0-incubating/io-kafka.html
+++ b/content/docs/zh-CN/2.1.0-incubating/io-kafka.html
@@ -21,7 +21,7 @@
           }
         });
       </script></span><li class="navSearchWrapper reactNavSearchWrapper"><input type="text" id="search_input_react" placeholder="Search" title="Search"/></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="container mainContainer"><div class="wrapper"><div class="post"><header class="postHeader"><a class="edit-page-link button" href="https://crowdin.com/project/apache-pulsar/zh-CN" target="_blank" rel="noreferrer noopener">Tran [...]
-<p>The Kafka Source Connector is used to pull messages from Kafka topics and persist the messages to a Pulsar topic.</p>
+<p>Kafka Source Connector 用于从 Kafka 的主题中拉取消息并持久化到 Pulsar 主题中。</p>
 <h3><a class="anchor" aria-hidden="true" id="source-configuration-options"></a><a href="#source-configuration-options" aria-hidden="true" class="hash-link"><svg class="hash-link-icon" aria-hidden="true" height="16" version="1.1" viewBox="0 0 16 16" width="16"><path fill-rule="evenodd" d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 [...]
 <table>
 <thead>
diff --git a/content/docs/zh-CN/2.1.0-incubating/io-kafka/index.html b/content/docs/zh-CN/2.1.0-incubating/io-kafka/index.html
index d458072..38e728d 100644
--- a/content/docs/zh-CN/2.1.0-incubating/io-kafka/index.html
+++ b/content/docs/zh-CN/2.1.0-incubating/io-kafka/index.html
@@ -21,7 +21,7 @@
           }
         });
       </script></span><li class="navSearchWrapper reactNavSearchWrapper"><input type="text" id="search_input_react" placeholder="Search" title="Search"/></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="container mainContainer"><div class="wrapper"><div class="post"><header class="postHeader"><a class="edit-page-link button" href="https://crowdin.com/project/apache-pulsar/zh-CN" target="_blank" rel="noreferrer noopener">Tran [...]
-<p>The Kafka Source Connector is used to pull messages from Kafka topics and persist the messages to a Pulsar topic.</p>
+<p>Kafka Source Connector 用于从 Kafka 的主题中拉取消息并持久化到 Pulsar 主题中。</p>
 <h3><a class="anchor" aria-hidden="true" id="source-configuration-options"></a><a href="#source-configuration-options" aria-hidden="true" class="hash-link"><svg class="hash-link-icon" aria-hidden="true" height="16" version="1.1" viewBox="0 0 16 16" width="16"><path fill-rule="evenodd" d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 [...]
 <table>
 <thead>
diff --git a/content/docs/zh-CN/2.1.1-incubating/io-kafka.html b/content/docs/zh-CN/2.1.1-incubating/io-kafka.html
index ee48218..8aac3ab 100644
--- a/content/docs/zh-CN/2.1.1-incubating/io-kafka.html
+++ b/content/docs/zh-CN/2.1.1-incubating/io-kafka.html
@@ -21,7 +21,7 @@
           }
         });
       </script></span><li class="navSearchWrapper reactNavSearchWrapper"><input type="text" id="search_input_react" placeholder="Search" title="Search"/></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="container mainContainer"><div class="wrapper"><div class="post"><header class="postHeader"><a class="edit-page-link button" href="https://crowdin.com/project/apache-pulsar/zh-CN" target="_blank" rel="noreferrer noopener">Tran [...]
-<p>The Kafka Source Connector is used to pull messages from Kafka topics and persist the messages to a Pulsar topic.</p>
+<p>Kafka Source Connector 用于从 Kafka 的主题中拉取消息并持久化到 Pulsar 主题中。</p>
 <h3><a class="anchor" aria-hidden="true" id="source-configuration-options"></a><a href="#source-configuration-options" aria-hidden="true" class="hash-link"><svg class="hash-link-icon" aria-hidden="true" height="16" version="1.1" viewBox="0 0 16 16" width="16"><path fill-rule="evenodd" d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 [...]
 <table>
 <thead>
diff --git a/content/docs/zh-CN/2.1.1-incubating/io-kafka/index.html b/content/docs/zh-CN/2.1.1-incubating/io-kafka/index.html
index ee48218..8aac3ab 100644
--- a/content/docs/zh-CN/2.1.1-incubating/io-kafka/index.html
+++ b/content/docs/zh-CN/2.1.1-incubating/io-kafka/index.html
@@ -21,7 +21,7 @@
           }
         });
       </script></span><li class="navSearchWrapper reactNavSearchWrapper"><input type="text" id="search_input_react" placeholder="Search" title="Search"/></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="container mainContainer"><div class="wrapper"><div class="post"><header class="postHeader"><a class="edit-page-link button" href="https://crowdin.com/project/apache-pulsar/zh-CN" target="_blank" rel="noreferrer noopener">Tran [...]
-<p>The Kafka Source Connector is used to pull messages from Kafka topics and persist the messages to a Pulsar topic.</p>
+<p>Kafka Source Connector 用于从 Kafka 的主题中拉取消息并持久化到 Pulsar 主题中。</p>
 <h3><a class="anchor" aria-hidden="true" id="source-configuration-options"></a><a href="#source-configuration-options" aria-hidden="true" class="hash-link"><svg class="hash-link-icon" aria-hidden="true" height="16" version="1.1" viewBox="0 0 16 16" width="16"><path fill-rule="evenodd" d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 [...]
 <table>
 <thead>
diff --git a/content/docs/zh-CN/2.2.0/io-kafka.html b/content/docs/zh-CN/2.2.0/io-kafka.html
index eed6fb3..4c131dd 100644
--- a/content/docs/zh-CN/2.2.0/io-kafka.html
+++ b/content/docs/zh-CN/2.2.0/io-kafka.html
@@ -21,7 +21,7 @@
           }
         });
       </script></span><li class="navSearchWrapper reactNavSearchWrapper"><input type="text" id="search_input_react" placeholder="Search" title="Search"/></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="container mainContainer"><div class="wrapper"><div class="post"><header class="postHeader"><a class="edit-page-link button" href="https://crowdin.com/project/apache-pulsar/zh-CN" target="_blank" rel="noreferrer noopener">Tran [...]
-<p>The Kafka Source Connector is used to pull messages from Kafka topics and persist the messages to a Pulsar topic.</p>
+<p>Kafka Source Connector 用于从 Kafka 的主题中拉取消息并持久化到 Pulsar 主题中。</p>
 <h3><a class="anchor" aria-hidden="true" id="source-configuration-options"></a><a href="#source-configuration-options" aria-hidden="true" class="hash-link"><svg class="hash-link-icon" aria-hidden="true" height="16" version="1.1" viewBox="0 0 16 16" width="16"><path fill-rule="evenodd" d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 [...]
 <table>
 <thead>
diff --git a/content/docs/zh-CN/2.2.0/io-kafka/index.html b/content/docs/zh-CN/2.2.0/io-kafka/index.html
index eed6fb3..4c131dd 100644
--- a/content/docs/zh-CN/2.2.0/io-kafka/index.html
+++ b/content/docs/zh-CN/2.2.0/io-kafka/index.html
@@ -21,7 +21,7 @@
           }
         });
       </script></span><li class="navSearchWrapper reactNavSearchWrapper"><input type="text" id="search_input_react" placeholder="Search" title="Search"/></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="container mainContainer"><div class="wrapper"><div class="post"><header class="postHeader"><a class="edit-page-link button" href="https://crowdin.com/project/apache-pulsar/zh-CN" target="_blank" rel="noreferrer noopener">Tran [...]
-<p>The Kafka Source Connector is used to pull messages from Kafka topics and persist the messages to a Pulsar topic.</p>
+<p>Kafka Source Connector 用于从 Kafka 的主题中拉取消息并持久化到 Pulsar 主题中。</p>
 <h3><a class="anchor" aria-hidden="true" id="source-configuration-options"></a><a href="#source-configuration-options" aria-hidden="true" class="hash-link"><svg class="hash-link-icon" aria-hidden="true" height="16" version="1.1" viewBox="0 0 16 16" width="16"><path fill-rule="evenodd" d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 [...]
 <table>
 <thead>
diff --git a/content/docs/zh-CN/2.2.1/io-kafka.html b/content/docs/zh-CN/2.2.1/io-kafka.html
index a09a385..914aee2 100644
--- a/content/docs/zh-CN/2.2.1/io-kafka.html
+++ b/content/docs/zh-CN/2.2.1/io-kafka.html
@@ -21,7 +21,7 @@
           }
         });
       </script></span><li class="navSearchWrapper reactNavSearchWrapper"><input type="text" id="search_input_react" placeholder="Search" title="Search"/></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="container mainContainer"><div class="wrapper"><div class="post"><header class="postHeader"><a class="edit-page-link button" href="https://crowdin.com/project/apache-pulsar/zh-CN" target="_blank" rel="noreferrer noopener">Tran [...]
-<p>The Kafka Source Connector is used to pull messages from Kafka topics and persist the messages to a Pulsar topic.</p>
+<p>Kafka Source Connector 用于从 Kafka 的主题中拉取消息并持久化到 Pulsar 主题中。</p>
 <h3><a class="anchor" aria-hidden="true" id="source-configuration-options"></a><a href="#source-configuration-options" aria-hidden="true" class="hash-link"><svg class="hash-link-icon" aria-hidden="true" height="16" version="1.1" viewBox="0 0 16 16" width="16"><path fill-rule="evenodd" d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 [...]
 <table>
 <thead>
diff --git a/content/docs/zh-CN/2.2.1/io-kafka/index.html b/content/docs/zh-CN/2.2.1/io-kafka/index.html
index a09a385..914aee2 100644
--- a/content/docs/zh-CN/2.2.1/io-kafka/index.html
+++ b/content/docs/zh-CN/2.2.1/io-kafka/index.html
@@ -21,7 +21,7 @@
           }
         });
       </script></span><li class="navSearchWrapper reactNavSearchWrapper"><input type="text" id="search_input_react" placeholder="Search" title="Search"/></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="container mainContainer"><div class="wrapper"><div class="post"><header class="postHeader"><a class="edit-page-link button" href="https://crowdin.com/project/apache-pulsar/zh-CN" target="_blank" rel="noreferrer noopener">Tran [...]
-<p>The Kafka Source Connector is used to pull messages from Kafka topics and persist the messages to a Pulsar topic.</p>
+<p>Kafka Source Connector 用于从 Kafka 的主题中拉取消息并持久化到 Pulsar 主题中。</p>
 <h3><a class="anchor" aria-hidden="true" id="source-configuration-options"></a><a href="#source-configuration-options" aria-hidden="true" class="hash-link"><svg class="hash-link-icon" aria-hidden="true" height="16" version="1.1" viewBox="0 0 16 16" width="16"><path fill-rule="evenodd" d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 [...]
 <table>
 <thead>
diff --git a/content/docs/zh-CN/2.3.0/io-kafka.html b/content/docs/zh-CN/2.3.0/io-kafka.html
index 1d94fd8..33c9b46 100644
--- a/content/docs/zh-CN/2.3.0/io-kafka.html
+++ b/content/docs/zh-CN/2.3.0/io-kafka.html
@@ -21,7 +21,7 @@
           }
         });
       </script></span><li class="navSearchWrapper reactNavSearchWrapper"><input type="text" id="search_input_react" placeholder="Search" title="Search"/></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="container mainContainer"><div class="wrapper"><div class="post"><header class="postHeader"><a class="edit-page-link button" href="https://crowdin.com/project/apache-pulsar/zh-CN" target="_blank" rel="noreferrer noopener">Tran [...]
-<p>The Kafka Source Connector is used to pull messages from Kafka topics and persist the messages to a Pulsar topic.</p>
+<p>Kafka Source Connector 用于从 Kafka 的主题中拉取消息并持久化到 Pulsar 主题中。</p>
 <h3><a class="anchor" aria-hidden="true" id="source-configuration-options"></a><a href="#source-configuration-options" aria-hidden="true" class="hash-link"><svg class="hash-link-icon" aria-hidden="true" height="16" version="1.1" viewBox="0 0 16 16" width="16"><path fill-rule="evenodd" d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 [...]
 <table>
 <thead>
diff --git a/content/docs/zh-CN/2.3.0/io-kafka/index.html b/content/docs/zh-CN/2.3.0/io-kafka/index.html
index 1d94fd8..33c9b46 100644
--- a/content/docs/zh-CN/2.3.0/io-kafka/index.html
+++ b/content/docs/zh-CN/2.3.0/io-kafka/index.html
@@ -21,7 +21,7 @@
           }
         });
       </script></span><li class="navSearchWrapper reactNavSearchWrapper"><input type="text" id="search_input_react" placeholder="Search" title="Search"/></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="container mainContainer"><div class="wrapper"><div class="post"><header class="postHeader"><a class="edit-page-link button" href="https://crowdin.com/project/apache-pulsar/zh-CN" target="_blank" rel="noreferrer noopener">Tran [...]
-<p>The Kafka Source Connector is used to pull messages from Kafka topics and persist the messages to a Pulsar topic.</p>
+<p>Kafka Source Connector 用于从 Kafka 的主题中拉取消息并持久化到 Pulsar 主题中。</p>
 <h3><a class="anchor" aria-hidden="true" id="source-configuration-options"></a><a href="#source-configuration-options" aria-hidden="true" class="hash-link"><svg class="hash-link-icon" aria-hidden="true" height="16" version="1.1" viewBox="0 0 16 16" width="16"><path fill-rule="evenodd" d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 [...]
 <table>
 <thead>
diff --git a/content/docs/zh-CN/io-kafka.html b/content/docs/zh-CN/io-kafka.html
index 83e71d1..72a8853 100644
--- a/content/docs/zh-CN/io-kafka.html
+++ b/content/docs/zh-CN/io-kafka.html
@@ -21,7 +21,7 @@
           }
         });
       </script></span><li class="navSearchWrapper reactNavSearchWrapper"><input type="text" id="search_input_react" placeholder="Search" title="Search"/></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="container mainContainer"><div class="wrapper"><div class="post"><header class="postHeader"><a class="edit-page-link button" href="https://crowdin.com/project/apache-pulsar/zh-CN" target="_blank" rel="noreferrer noopener">Tran [...]
-<p>The Kafka Source Connector is used to pull messages from Kafka topics and persist the messages to a Pulsar topic.</p>
+<p>Kafka Source Connector 用于从 Kafka 的主题中拉取消息并持久化到 Pulsar 主题中。</p>
 <h3><a class="anchor" aria-hidden="true" id="source-configuration-options"></a><a href="#source-configuration-options" aria-hidden="true" class="hash-link"><svg class="hash-link-icon" aria-hidden="true" height="16" version="1.1" viewBox="0 0 16 16" width="16"><path fill-rule="evenodd" d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 [...]
 <table>
 <thead>
diff --git a/content/docs/zh-CN/io-kafka/index.html b/content/docs/zh-CN/io-kafka/index.html
index 83e71d1..72a8853 100644
--- a/content/docs/zh-CN/io-kafka/index.html
+++ b/content/docs/zh-CN/io-kafka/index.html
@@ -21,7 +21,7 @@
           }
         });
       </script></span><li class="navSearchWrapper reactNavSearchWrapper"><input type="text" id="search_input_react" placeholder="Search" title="Search"/></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="container mainContainer"><div class="wrapper"><div class="post"><header class="postHeader"><a class="edit-page-link button" href="https://crowdin.com/project/apache-pulsar/zh-CN" target="_blank" rel="noreferrer noopener">Tran [...]
-<p>The Kafka Source Connector is used to pull messages from Kafka topics and persist the messages to a Pulsar topic.</p>
+<p>Kafka Source Connector 用于从 Kafka 的主题中拉取消息并持久化到 Pulsar 主题中。</p>
 <h3><a class="anchor" aria-hidden="true" id="source-configuration-options"></a><a href="#source-configuration-options" aria-hidden="true" class="hash-link"><svg class="hash-link-icon" aria-hidden="true" height="16" version="1.1" viewBox="0 0 16 16" width="16"><path fill-rule="evenodd" d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 [...]
 <table>
 <thead>
diff --git a/content/docs/zh-CN/next/io-kafka.html b/content/docs/zh-CN/next/io-kafka.html
index 9d8379d..7debb62 100644
--- a/content/docs/zh-CN/next/io-kafka.html
+++ b/content/docs/zh-CN/next/io-kafka.html
@@ -21,7 +21,7 @@
           }
         });
       </script></span><li class="navSearchWrapper reactNavSearchWrapper"><input type="text" id="search_input_react" placeholder="Search" title="Search"/></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="container mainContainer"><div class="wrapper"><div class="post"><header class="postHeader"><a class="edit-page-link button" href="https://crowdin.com/project/apache-pulsar/zh-CN" target="_blank" rel="noreferrer noopener">Tran [...]
-<p>The Kafka Source Connector is used to pull messages from Kafka topics and persist the messages to a Pulsar topic.</p>
+<p>Kafka Source Connector 用于从 Kafka 的主题中拉取消息并持久化到 Pulsar 主题中。</p>
 <h3><a class="anchor" aria-hidden="true" id="source-configuration-options"></a><a href="#source-configuration-options" aria-hidden="true" class="hash-link"><svg class="hash-link-icon" aria-hidden="true" height="16" version="1.1" viewBox="0 0 16 16" width="16"><path fill-rule="evenodd" d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 [...]
 <table>
 <thead>
diff --git a/content/docs/zh-CN/next/io-kafka/index.html b/content/docs/zh-CN/next/io-kafka/index.html
index 9d8379d..7debb62 100644
--- a/content/docs/zh-CN/next/io-kafka/index.html
+++ b/content/docs/zh-CN/next/io-kafka/index.html
@@ -21,7 +21,7 @@
           }
         });
       </script></span><li class="navSearchWrapper reactNavSearchWrapper"><input type="text" id="search_input_react" placeholder="Search" title="Search"/></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="container mainContainer"><div class="wrapper"><div class="post"><header class="postHeader"><a class="edit-page-link button" href="https://crowdin.com/project/apache-pulsar/zh-CN" target="_blank" rel="noreferrer noopener">Tran [...]
-<p>The Kafka Source Connector is used to pull messages from Kafka topics and persist the messages to a Pulsar topic.</p>
+<p>Kafka Source Connector 用于从 Kafka 的主题中拉取消息并持久化到 Pulsar 主题中。</p>
 <h3><a class="anchor" aria-hidden="true" id="source-configuration-options"></a><a href="#source-configuration-options" aria-hidden="true" class="hash-link"><svg class="hash-link-icon" aria-hidden="true" height="16" version="1.1" viewBox="0 0 16 16" width="16"><path fill-rule="evenodd" d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 [...]
 <table>
 <thead>
diff --git a/content/docs/zh-CN/next/reference-configuration.html b/content/docs/zh-CN/next/reference-configuration.html
index 4cfa5f0..a774036 100644
--- a/content/docs/zh-CN/next/reference-configuration.html
+++ b/content/docs/zh-CN/next/reference-configuration.html
@@ -146,10 +146,10 @@
 <tr><td>useHostNameAsBookieID</td><td>Whether the bookie should use its hostname to register with the coordination service (e.g.: zookeeper service). When false, bookie will use its ipaddress for the registration.</td><td>false</td></tr>
 <tr><td>statsProviderClass</td><td></td><td>org.apache.bookkeeper.stats.prometheus.PrometheusMetricsProvider</td></tr>
 <tr><td>prometheusStatsHttpPort</td><td></td><td>8000</td></tr>
-<tr><td>dbStorage_writeCacheMaxSizeMb</td><td>Size of Write Cache. Memory is allocated from JVM direct memory. Write cache is used to buffer entries before flushing into the entry log For good performance, it should be big enough to hold a sub</td><td>512</td></tr>
-<tr><td>dbStorage_readAheadCacheMaxSizeMb</td><td>Size of Read cache. Memory is allocated from JVM direct memory. This read cache is pre-filled doing read-ahead whenever a cache miss happens</td><td>256</td></tr>
+<tr><td>dbStorage_writeCacheMaxSizeMb</td><td>Size of Write Cache. Memory is allocated from JVM direct memory. Write cache is used to buffer entries before flushing into the entry log For good performance, it should be big enough to hold a sub</td><td>25% of direct memory</td></tr>
+<tr><td>dbStorage_readAheadCacheMaxSizeMb</td><td>Size of Read cache. Memory is allocated from JVM direct memory. This read cache is pre-filled doing read-ahead whenever a cache miss happens</td><td>25% of direct memory</td></tr>
 <tr><td>dbStorage_readAheadCacheBatchSize</td><td>How many entries to pre-fill in cache after a read cache miss</td><td>1000</td></tr>
-<tr><td>dbStorage_rocksDB_blockCacheSize</td><td>Size of RocksDB block-cache. For best performance, this cache should be big enough to hold a significant portion of the index database which can reach ~2GB in some cases</td><td>268435456</td></tr>
+<tr><td>dbStorage_rocksDB_blockCacheSize</td><td>Size of RocksDB block-cache. For best performance, this cache should be big enough to hold a significant portion of the index database which can reach ~2GB in some cases</td><td>10% of direct memory</td></tr>
 <tr><td>dbStorage_rocksDB_writeBufferSizeMB</td><td></td><td>64</td></tr>
 <tr><td>dbStorage_rocksDB_sstSizeInMB</td><td></td><td>64</td></tr>
 <tr><td>dbStorage_rocksDB_blockSize</td><td></td><td>65536</td></tr>
diff --git a/content/docs/zh-CN/next/reference-configuration/index.html b/content/docs/zh-CN/next/reference-configuration/index.html
index 4cfa5f0..a774036 100644
--- a/content/docs/zh-CN/next/reference-configuration/index.html
+++ b/content/docs/zh-CN/next/reference-configuration/index.html
@@ -146,10 +146,10 @@
 <tr><td>useHostNameAsBookieID</td><td>Whether the bookie should use its hostname to register with the coordination service (e.g.: zookeeper service). When false, bookie will use its ipaddress for the registration.</td><td>false</td></tr>
 <tr><td>statsProviderClass</td><td></td><td>org.apache.bookkeeper.stats.prometheus.PrometheusMetricsProvider</td></tr>
 <tr><td>prometheusStatsHttpPort</td><td></td><td>8000</td></tr>
-<tr><td>dbStorage_writeCacheMaxSizeMb</td><td>Size of Write Cache. Memory is allocated from JVM direct memory. Write cache is used to buffer entries before flushing into the entry log For good performance, it should be big enough to hold a sub</td><td>512</td></tr>
-<tr><td>dbStorage_readAheadCacheMaxSizeMb</td><td>Size of Read cache. Memory is allocated from JVM direct memory. This read cache is pre-filled doing read-ahead whenever a cache miss happens</td><td>256</td></tr>
+<tr><td>dbStorage_writeCacheMaxSizeMb</td><td>Size of Write Cache. Memory is allocated from JVM direct memory. Write cache is used to buffer entries before flushing into the entry log For good performance, it should be big enough to hold a sub</td><td>25% of direct memory</td></tr>
+<tr><td>dbStorage_readAheadCacheMaxSizeMb</td><td>Size of Read cache. Memory is allocated from JVM direct memory. This read cache is pre-filled doing read-ahead whenever a cache miss happens</td><td>25% of direct memory</td></tr>
 <tr><td>dbStorage_readAheadCacheBatchSize</td><td>How many entries to pre-fill in cache after a read cache miss</td><td>1000</td></tr>
-<tr><td>dbStorage_rocksDB_blockCacheSize</td><td>Size of RocksDB block-cache. For best performance, this cache should be big enough to hold a significant portion of the index database which can reach ~2GB in some cases</td><td>268435456</td></tr>
+<tr><td>dbStorage_rocksDB_blockCacheSize</td><td>Size of RocksDB block-cache. For best performance, this cache should be big enough to hold a significant portion of the index database which can reach ~2GB in some cases</td><td>10% of direct memory</td></tr>
 <tr><td>dbStorage_rocksDB_writeBufferSizeMB</td><td></td><td>64</td></tr>
 <tr><td>dbStorage_rocksDB_sstSizeInMB</td><td></td><td>64</td></tr>
 <tr><td>dbStorage_rocksDB_blockSize</td><td></td><td>65536</td></tr>
diff --git a/content/swagger/swagger.json b/content/swagger/swagger.json
index 003840a..0fd9b5e 100644
--- a/content/swagger/swagger.json
+++ b/content/swagger/swagger.json
@@ -7164,9 +7164,24 @@
           "type" : "number",
           "format" : "double"
         },
-        "cpu" : {
+        "bandwidthIn" : {
+          "$ref" : "#/definitions/ResourceUsage"
+        },
+        "bandwidthOut" : {
+          "$ref" : "#/definitions/ResourceUsage"
+        },
+        "overLoaded" : {
+          "type" : "boolean"
+        },
+        "loadReportType" : {
+          "type" : "string"
+        },
+        "memory" : {
           "$ref" : "#/definitions/ResourceUsage"
         },
+        "underLoaded" : {
+          "type" : "boolean"
+        },
         "directMemory" : {
           "$ref" : "#/definitions/ResourceUsage"
         },
@@ -7182,22 +7197,7 @@
           "type" : "number",
           "format" : "double"
         },
-        "underLoaded" : {
-          "type" : "boolean"
-        },
-        "overLoaded" : {
-          "type" : "boolean"
-        },
-        "loadReportType" : {
-          "type" : "string"
-        },
-        "bandwidthIn" : {
-          "$ref" : "#/definitions/ResourceUsage"
-        },
-        "bandwidthOut" : {
-          "$ref" : "#/definitions/ResourceUsage"
-        },
-        "memory" : {
+        "cpu" : {
           "$ref" : "#/definitions/ResourceUsage"
         }
       }
@@ -7358,10 +7358,10 @@
           "type" : "number",
           "format" : "double"
         },
-        "producerName" : {
+        "connectedSince" : {
           "type" : "string"
         },
-        "connectedSince" : {
+        "producerName" : {
           "type" : "string"
         },
         "clientVersion" : {
@@ -8075,10 +8075,10 @@
             "type" : "string"
           }
         },
-        "producerName" : {
+        "connectedSince" : {
           "type" : "string"
         },
-        "connectedSince" : {
+        "producerName" : {
           "type" : "string"
         },
         "clientVersion" : {
@@ -8183,11 +8183,11 @@
     "ResourceUnit" : {
       "type" : "object",
       "properties" : {
-        "resourceId" : {
-          "type" : "string"
-        },
         "availableResource" : {
           "$ref" : "#/definitions/ResourceDescription"
+        },
+        "resourceId" : {
+          "type" : "string"
         }
       }
     },
diff --git a/content/swagger/swaggerfunctions.json b/content/swagger/swaggerfunctions.json
index 23f9bde..efa4894 100644
--- a/content/swagger/swaggerfunctions.json
+++ b/content/swagger/swaggerfunctions.json
@@ -348,7 +348,7 @@
           "200" : {
             "description" : "successful operation",
             "schema" : {
-              "type" : "string"
+              "$ref" : "#/definitions/FunctionState"
             }
           },
           "400" : {
@@ -1106,6 +1106,25 @@
         }
       }
     },
+    "FunctionState" : {
+      "type" : "object",
+      "properties" : {
+        "key" : {
+          "type" : "string"
+        },
+        "stringValue" : {
+          "type" : "string"
+        },
+        "numberValue" : {
+          "type" : "integer",
+          "format" : "int64"
+        },
+        "version" : {
+          "type" : "integer",
+          "format" : "int64"
+        }
+      }
+    },
     "FunctionStats" : {
       "type" : "object",
       "properties" : {
@@ -1166,6 +1185,17 @@
     "Message" : {
       "type" : "object",
       "properties" : {
+        "publishTime" : {
+          "type" : "integer",
+          "format" : "int64"
+        },
+        "eventTime" : {
+          "type" : "integer",
+          "format" : "int64"
+        },
+        "messageId" : {
+          "$ref" : "#/definitions/MessageId"
+        },
         "sequenceId" : {
           "type" : "integer",
           "format" : "int64"
@@ -1184,9 +1214,6 @@
           "type" : "integer",
           "format" : "int32"
         },
-        "messageId" : {
-          "$ref" : "#/definitions/MessageId"
-        },
         "schemaVersion" : {
           "type" : "array",
           "items" : {
@@ -1194,20 +1221,9 @@
             "format" : "byte"
           }
         },
-        "publishTime" : {
-          "type" : "integer",
-          "format" : "int64"
-        },
-        "eventTime" : {
-          "type" : "integer",
-          "format" : "int64"
-        },
         "topicName" : {
           "type" : "string"
         },
-        "producerName" : {
-          "type" : "string"
-        },
         "keyBytes" : {
           "type" : "array",
           "items" : {
@@ -1215,6 +1231,9 @@
             "format" : "byte"
           }
         },
+        "producerName" : {
+          "type" : "string"
+        },
         "data" : {
           "type" : "array",
           "items" : {