You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@druid.apache.org by fj...@apache.org on 2019/05/16 18:13:58 UTC

[incubator-druid] branch master updated: Add basic tuning guide, getting started page, updated clustering docs (#7629)

This is an automated email from the ASF dual-hosted git repository.

fjy pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-druid.git


The following commit(s) were added to refs/heads/master by this push:
     new d667655  Add basic tuning guide, getting started page, updated clustering docs (#7629)
d667655 is described below

commit d667655871f2f9698b8438ebb39015c43d010a66
Author: Jonathan Wei <jo...@users.noreply.github.com>
AuthorDate: Thu May 16 11:13:48 2019 -0700

    Add basic tuning guide, getting started page, updated clustering docs (#7629)
    
    * Add basic tuning guide, getting started page, updated clustering docs
    
    * Add note about caching, fix tutorial paths
    
    * Adjust hadoop wording
    
    * Add license
    
    * Tweak
    
    * Shrink overlord heaps, fix tutorial urls
    
    * Tweak xlarge peon, update peon sizing
    
    * Update Data peon buffer size
    
    * Fix cluster start scripts
    
    * Add upper level _common to classpath
    
    * Fix cluster data/query confs
    
    * Address PR comments
    
    * Elaborate on connection pools
    
    * PR comments
    
    * Increase druid.broker.http.maxQueuedBytes
    
    * Add guidelines for broker backpressure
    
    * PR comments
---
 docs/_redirects.json                               |   4 +-
 docs/content/configuration/logging.md              |  33 ++
 docs/content/operations/basic-cluster-tuning.md    | 382 +++++++++++++++++++++
 docs/content/operations/getting-started.md         |  49 +++
 docs/content/operations/performance-faq.md         |  95 -----
 docs/content/operations/single-server.md           |   2 +
 docs/content/toc.md                                |  37 +-
 docs/content/tutorials/cluster.md                  | 378 ++++++++++++--------
 docs/content/tutorials/index.md                    |   6 +-
 docs/content/tutorials/tutorial-batch-hadoop.md    |  10 +-
 docs/content/tutorials/tutorial-batch.md           |   8 +-
 docs/content/tutorials/tutorial-compaction.md      |   6 +-
 docs/content/tutorials/tutorial-delete-data.md     |   2 +-
 docs/content/tutorials/tutorial-ingestion-spec.md  |   2 +-
 docs/content/tutorials/tutorial-retention.md       |   2 +-
 docs/content/tutorials/tutorial-rollup.md          |   2 +-
 docs/content/tutorials/tutorial-tranquility.md     |   8 +-
 docs/content/tutorials/tutorial-transform-spec.md  |   2 +-
 docs/content/tutorials/tutorial-update-data.md     |   8 +-
 examples/bin/run-druid                             |   2 +-
 .../conf/druid/cluster/data/historical/jvm.config  |   2 +-
 .../cluster/data/historical/runtime.properties     |   4 +-
 .../cluster/data/middleManager/runtime.properties  |   4 +-
 .../master}/coordinator-overlord/jvm.config        |   4 +-
 .../main.config                                    |   0
 .../runtime.properties                             |   9 +
 .../druid/cluster/master/coordinator/jvm.config    |   9 -
 .../conf/druid/cluster/master/overlord/jvm.config  |   8 -
 .../conf/druid/cluster/master/overlord/main.config |   1 -
 .../cluster/master/overlord/runtime.properties     |  26 --
 .../conf/druid/cluster/query/broker/jvm.config     |   6 +-
 .../druid/cluster/query/broker/runtime.properties  |   4 +-
 .../druid/single-server/large/broker/jvm.config    |   6 +-
 .../single-server/large/broker/runtime.properties  |   4 +-
 .../large/coordinator-overlord/jvm.config          |   4 +-
 .../single-server/large/historical/jvm.config      |   2 +-
 .../large/historical/runtime.properties            |   4 +-
 .../large/middleManager/runtime.properties         |   2 +-
 .../druid/single-server/medium/broker/jvm.config   |   2 +-
 .../single-server/medium/broker/runtime.properties |   2 +-
 .../medium/coordinator-overlord/jvm.config         |   4 +-
 .../medium/historical/runtime.properties           |   2 +-
 .../medium/middleManager/runtime.properties        |   2 +-
 .../middleManager/runtime.properties               |   2 +-
 .../small/coordinator-overlord/jvm.config          |   4 +-
 .../small/historical/runtime.properties            |   2 +-
 .../small/middleManager/runtime.properties         |   2 +-
 .../druid/single-server/xlarge/broker/jvm.config   |   4 +-
 .../single-server/xlarge/broker/runtime.properties |   2 +-
 .../xlarge/coordinator-overlord/jvm.config         |   4 +-
 .../xlarge/historical/runtime.properties           |   2 +-
 .../xlarge/middleManager/runtime.properties        |   4 +-
 examples/conf/supervise/cluster/data.conf          |  10 +-
 examples/conf/supervise/cluster/master-no-zk.conf  |   4 +-
 .../conf/supervise/cluster/master-with-zk.conf     |   4 +-
 examples/conf/supervise/cluster/query.conf         |   5 +-
 56 files changed, 811 insertions(+), 387 deletions(-)

diff --git a/docs/_redirects.json b/docs/_redirects.json
index 52680ff..1ab3a9c 100644
--- a/docs/_redirects.json
+++ b/docs/_redirects.json
@@ -164,5 +164,7 @@
   {"source": "development/community-extensions/graphite.html", "target": "../extensions-contrib/graphite.html"},
   {"source": "development/community-extensions/kafka-simple.html", "target": "../extensions-contrib/kafka-simple.html"},
   {"source": "development/community-extensions/rabbitmq.html", "target": "../extensions-contrib/rabbitmq.html"},
-  {"source": "development/extensions-core/namespaced-lookup.html", "target": "lookups-cached-global.html"}
+  {"source": "development/extensions-core/namespaced-lookup.html", "target": "lookups-cached-global.html"},
+  {"source": "operations/insert-segment-to-db.html", "target": "../index.html"},
+  {"source": "operations/performance-faq.html", "target": "../operations/basic-cluster-tuning.html"}
 ]
diff --git a/docs/content/configuration/logging.md b/docs/content/configuration/logging.md
index 1c89b7d..28c9052 100644
--- a/docs/content/configuration/logging.md
+++ b/docs/content/configuration/logging.md
@@ -53,3 +53,36 @@ An example log4j2.xml ships with Druid under config/_common/log4j2.xml, and a sa
   </Loggers>
 </Configuration>
 ```
+
+## My logs are really chatty, can I set them to asynchronously write?
+
+Yes, using a `log4j2.xml` similar to the following causes some of the more chatty classes to write asynchronously:
+
+```
+<?xml version="1.0" encoding="UTF-8" ?>
+<Configuration status="WARN">
+  <Appenders>
+    <Console name="Console" target="SYSTEM_OUT">
+      <PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/>
+    </Console>
+  </Appenders>
+  <Loggers>
+    <AsyncLogger name="org.apache.druid.curator.inventory.CuratorInventoryManager" level="debug" additivity="false">
+      <AppenderRef ref="Console"/>
+    </AsyncLogger>
+    <AsyncLogger name="org.apache.druid.client.BatchServerInventoryView" level="debug" additivity="false">
+      <AppenderRef ref="Console"/>
+    </AsyncLogger>
+    <!-- Make extra sure nobody adds logs in a bad way that can hurt performance -->
+    <AsyncLogger name="org.apache.druid.client.ServerInventoryView" level="debug" additivity="false">
+      <AppenderRef ref="Console"/>
+    </AsyncLogger>
+    <AsyncLogger name ="org.apache.druid.java.util.http.client.pool.ChannelResourceFactory" level="info" additivity="false">
+      <AppenderRef ref="Console"/>
+    </AsyncLogger>
+    <Root level="info">
+      <AppenderRef ref="Console"/>
+    </Root>
+  </Loggers>
+</Configuration>
+```
diff --git a/docs/content/operations/basic-cluster-tuning.md b/docs/content/operations/basic-cluster-tuning.md
new file mode 100644
index 0000000..aa09c07
--- /dev/null
+++ b/docs/content/operations/basic-cluster-tuning.md
@@ -0,0 +1,382 @@
+---
+layout: doc_page
+title: "Basic Cluster Tuning"
+---
+
+<!--
+  ~ Licensed to the Apache Software Foundation (ASF) under one
+  ~ or more contributor license agreements.  See the NOTICE file
+  ~ distributed with this work for additional information
+  ~ regarding copyright ownership.  The ASF licenses this file
+  ~ to you under the Apache License, Version 2.0 (the
+  ~ "License"); you may not use this file except in compliance
+  ~ with the License.  You may obtain a copy of the License at
+  ~
+  ~   http://www.apache.org/licenses/LICENSE-2.0
+  ~
+  ~ Unless required by applicable law or agreed to in writing,
+  ~ software distributed under the License is distributed on an
+  ~ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  ~ KIND, either express or implied.  See the License for the
+  ~ specific language governing permissions and limitations
+  ~ under the License.
+  -->
+
+# Basic Cluster Tuning
+
+This document provides basic guidelines for configuration properties and cluster architecture considerations related to performance tuning of an Apache Druid (incubating) deployment. 
+
+Please note that this document provides general guidelines and rules-of-thumb: these are not absolute, universal rules for cluster tuning, and this introductory guide is not an exhaustive description of all Druid tuning properties, which are described in the [configuration reference](../configuration/index.html).
+
+If you have questions on tuning Druid for specific use cases, or questions on configuration properties not covered in this guide, please ask the [Druid user mailing list or other community channels](https://druid.apache.org/community/).
+
+## Process-specific guidelines
+
+### Historical
+
+#### Heap sizing
+
+The biggest contributions to heap usage on Historicals are:
+- Partial unmerged query results from segments
+- The stored maps for [lookups](../querying/lookups.html).
+
+A general rule-of-thumb for sizing the Historical heap is `(0.5GB * number of CPU cores)`, with an upper limit of ~24GB.
+
+This rule-of-thumb scales using the number of CPU cores as a convenient proxy for hardware size and level of concurrency (note: this formula is not a hard rule for sizing Historical heaps).
+
+Having a heap that is too large can result in excessively long GC collection pauses, the ~24GB upper limit is imposed to avoid this.
+
+If caching is enabled on Historicals, the cache is stored on heap, sized by `druid.cache.sizeInBytes`.
+
+Running out of heap on the Historicals can indicate misconfiguration or usage patterns that are overloading the cluster.
+
+##### Lookups
+
+If you are using lookups, calculate the total size of the lookup maps being loaded. 
+
+Druid performs an atomic swap when updating lookup maps (both the old map and the new map will exist in heap during the swap), so the maximum potential heap usage from lookup maps will be (2 * total size of all loaded lookups).
+
+Be sure to add `(2 * total size of all loaded lookups)` to your heap size in addition to the `(0.5GB * number of CPU cores)` guideline.
+
+#### Processing Threads and Buffers
+
+Please see the [General Guidelines for Processing Threads and Buffers](#general-guidelines-for-processing-threads-and-buffers) section for an overview of processing thread/buffer configuration.
+
+On Historicals:
+- `druid.processing.numThreads` should generally be set to `(number of cores - 1)`: a smaller value can result in CPU underutilization, while going over the number of cores can result in unnecessary CPU contention.
+- `druid.processing.buffer.sizeBytes` can be set to 500MB.
+- `druid.processing.numMergeBuffers`, a 1:4 ratio of  merge buffers to processing threads is a reasonable choice for general use.
+
+#### Direct Memory Sizing
+
+The processing and merge buffers described above are direct memory buffers.
+
+When a historical processes a query, it must open a set of segments for reading. This also requires some direct memory space, described in [segment decompression buffers](#segment-decompression).
+
+A formula for estimating direct memory usage follows:
+
+(`druid.processing.numThreads` + `druid.processing.numMergeBuffers` + 1) * `druid.processing.buffer.sizeBytes`
+
+The `+ 1` factor is a fuzzy estimate meant to account for the segment decompression buffers.
+
+#### Connection Pool Sizing
+
+Please see the [General Connection Pool Guidelines](#general-connection-pool-guidelines) section for an overview of connection pool configuration.
+
+For Historicals, `druid.server.http.numThreads` should be set to a value slightly higher than the sum of `druid.broker.http.numConnections` across all the Brokers in the cluster.
+
+Tuning the cluster so that each Historical can accept 50 queries and 10 non-queries is a reasonable starting point.
+
+#### Segment Cache Size
+
+`druid.server.maxSize` controls the total size of segment data that can be assigned by the Coordinator to a Historical.
+
+`druid.segmentCache.locations` specifies locations where segment data can be stored on the Historical. The sum of available disk space across these locations should equal `druid.server.maxSize`.
+
+Segments are memory-mapped by Historical processes using any available free system memory (i.e., memory not used by the Historical JVM and heap/direct memory buffers or other processes on the system). Segments that are not currently in memory will be paged from disk when queried.
+
+Therefore, `druid.server.maxSize` should be set such that a Historical is not allocated an excessive amount of segment data. As the value of (`free system memory` / `druid.server.maxSize`) increases, a greater proportion of segments can be kept in memory, allowing for better query performance.
+
+#### Number of Historicals
+
+The number of Historicals needed in a cluster depends on how much data the cluster has. For good performance, you will want enough Historicals such that each Historical has a good (`free system memory` / `druid.server.maxSize`) ratio, as described in the segment cache size section above.
+
+Having a smaller number of big servers is generally better than having a large number of small servers, as long as you have enough fault tolerance for your use case.
+
+#### SSD storage
+
+We recommend using SSDs for storage on the Historicals, as they handle segment data stored on disk.
+
+#### Total Memory Usage
+
+To estimate total memory usage of the Historical under these guidelines:
+
+- Heap: `(0.5GB * number of CPU cores) + (2 * total size of lookup maps) + druid.cache.sizeInBytes`
+- Direct Memory: `(druid.processing.numThreads + druid.processing.numMergeBuffers + 1) * druid.processing.buffer.sizeBytes`
+
+The Historical will use any available free system memory (i.e., memory not used by the Historical JVM and heap/direct memory buffers or other processes on the system) for memory-mapping of segments on disk. For better query performance, you will want to ensure a good (`free system memory` / `druid.server.maxSize`) ratio so that a greater proportion of segments can be kept in memory.
+
+### Broker
+
+#### Heap Sizing
+
+The biggest contributions to heap usage on Brokers are:
+- Partial unmerged query results from Historicals and Tasks
+- The segment timeline: this consists of location information (which Historical/Task is serving a segment) for all currently [available](../ingestion/index.html#segment-states) segments.
+- Cached segment metadata: this consists of metadata, such as per-segment schemas, for all currently available segments.
+
+The Broker heap requirements scale based on the number of segments in the cluster, and the total data size of the segments. 
+
+The heap size will vary based on data size and usage patterns, but 4G to 8G is a good starting point for a small or medium cluster (~15 servers or less). For a rough estimate of memory requirements on the high end, very large clusters with a node count on the order of ~100 nodes may need Broker heaps of 30GB-60GB.
+
+If caching is enabled on the Broker, the cache is stored on heap, sized by `druid.cache.sizeInBytes`.
+
+#### Direct Memory Sizing
+
+On the Broker, the amount of direct memory needed depends on how many merge buffers (used for merging GroupBys) are configured. The Broker does not generally need processing threads or processing buffers, as query results are merged on-heap in the HTTP connection threads instead.
+
+- `druid.processing.buffer.sizeBytes` can be set to 500MB.
+- `druid.processing.numThreads`: set this to 1 (the minimum allowed)
+- `druid.processing.numMergeBuffers`: set this to the same value as on Historicals or a bit higher
+
+##### Note on the deprecated `chunkPeriod`
+
+There is one exception to the Broker not needing processing threads and processing buffers:
+
+If the deprecated `chunkPeriod` property in the [query context](../querying/query-context.html) is set, GroupBy V1 queries will use processing threads and processing buffers on the Broker.
+
+Both `chunkPeriod` and GroupBy V1 are deprecated (use GroupBy V2 instead) and will be removed in the future, we do not recommend using them. The presence of the deprecated `chunkPeriod` feature is why a minimum of 1 processing thread must be configured, even if it's unused.
+
+#### Connection Pool Sizing
+
+Please see the [General Connection Pool Guidelines](#general-connection-pool-guidelines) section for an overview of connection pool configuration.
+
+On the Brokers, please ensure that the sum of `druid.broker.http.numConnections` across all the Brokers is slightly lower than the value of `druid.server.http.numThreads` on your Historicals and Tasks.
+
+`druid.server.http.numThreads` on the Broker should be set to a value slightly higher than `druid.broker.http.numConnections` on the same Broker.
+
+Tuning the cluster so that each Historical can accept 50 queries and 10 non-queries, adjusting the Brokers accordingly, is a reasonable starting point.
+
+#### Broker Backpressure
+
+When retrieving query results from Historical processes or Tasks, the Broker can optionally specify a maximum buffer size for queued, unread data, and exert backpressure on the channel to the Historical or Tasks when limit is reached (causing writes to the channel to block on the Historical/Task side until the Broker is able to drain some data from the channel).
+
+This buffer size is controlled by the `druid.broker.http.maxQueuedBytes` setting.
+
+The limit is divided across the number of Historicals/Tasks that a query would hit: suppose I have `druid.broker.http.maxQueuedBytes` set to 5MB, and the Broker receives a query that needs to be fanned out to 2 Historicals. Each per-historical channel would get a 2.5MB buffer in this case.
+
+You can generally set this to a value of approximately `2MB * number of Historicals`. As your cluster scales up with more Historicals and Tasks, consider increasing this buffer size and increasing the Broker heap accordingly.
+
+- If the buffer is too small, this can lead to inefficient queries due to the buffer filling up rapidly and stalling the channel
+- If the buffer is too large, this puts more memory pressure on the Broker due to more queued result data in the HTTP channels.
+
+#### Number of Brokers
+
+A 1:15 ratio of Brokers to Historicals is a reasonable starting point (this is not a hard rule).
+
+If you need Broker HA, you can deploy 2 initially and then use the 1:15 ratio guideline for additional Brokers.
+
+#### Total Memory Usage
+
+To estimate total memory usage of the Broker under these guidelines:
+
+- Heap: allocated heap size
+- Direct Memory: `(druid.processing.numThreads + druid.processing.numMergeBuffers + 1) * druid.processing.buffer.sizeBytes`
+
+### MiddleManager
+
+The MiddleManager is a lightweight task controller/manager that launches Task processes, which perform ingestion work.
+
+#### MiddleManager Heap Sizing
+
+The MiddleManager itself does not require much resources, you can set the heap to ~128MB generally.
+
+#### SSD storage
+
+We recommend using SSDs for storage on the MiddleManagers, as the Tasks launched by MiddleManagers handle segment data stored on disk.
+
+#### Task Count
+
+The number of tasks a MiddleManager can launch is controlled by the `druid.worker.capacity` setting. 
+
+The number of workers needed in your cluster depends on how many concurrent ingestion tasks you need to run for your use cases. The number of workers that can be launched on a given machine depends on the size of resources allocated per worker and available system resources.
+
+You can allocate more MiddleManager machines to your cluster to add task capacity.
+
+#### Task Configurations
+
+The following section below describes configuration for Tasks launched by the MiddleManager. The Tasks can be queried and perform ingestion workloads, so they require more resources than the MM.
+
+##### Task Heap Sizing
+
+A 1GB heap is usually enough for Tasks.
+
+###### Lookups
+
+If you are using lookups, calculate the total size of the lookup maps being loaded. 
+
+Druid performs an atomic swap when updating lookup maps (both the old map and the new map will exist in heap during the swap), so the maximum potential heap usage from lookup maps will be (2 * total size of all loaded lookups).
+
+Be sure to add `(2 * total size of all loaded lookups)` to your Task heap size if you are using lookups.
+
+##### Task processing threads and buffers
+
+For Tasks, 1 or 2 processing threads are often enough, as the Tasks tend to hold much less queryable data than Historical processes.
+
+- `druid.indexer.fork.property.druid.processing.numThreads`: set this to 1 or 2
+- `druid.indexer.fork.property.druid.processing.numMergeBuffers`: set this to 2
+- `druid.indexer.fork.property.druid.processing.buffer.sizeBytes`: can be set to 100MB
+
+##### Direct Memory Sizing
+
+The processing and merge buffers described above are direct memory buffers.
+
+When a Task processes a query, it must open a set of segments for reading. This also requires some direct memory space, described in [segment decompression buffers](#segment-decompression).
+
+An ingestion Task also needs to merge partial ingestion results, which requires direct memory space, described in [segment merging](#segment-merging).
+
+A formula for estimating direct memory usage follows:
+
+(`druid.processing.numThreads` + `druid.processing.numMergeBuffers` + 1) * `druid.processing.buffer.sizeBytes`
+
+The `+ 1` factor is a fuzzy estimate meant to account for the segment decompression buffers and dictionary merging buffers.
+
+##### Connection Pool Sizing
+
+Please see the [General Connection Pool Guidelines](#general-connection-pool-guidelines) section for an overview of connection pool configuration.
+
+For Tasks, `druid.server.http.numThreads` should be set to a value slightly higher than the sum of `druid.broker.http.numConnections` across all the Brokers in the cluster.
+
+Tuning the cluster so that each Task can accept 50 queries and 10 non-queries is a reasonable starting point.
+
+#### Total Memory Usage
+
+To estimate total memory usage of a Task under these guidelines:
+
+- Heap: `1GB + (2 * total size of lookup maps)`
+- Direct Memory: `(druid.processing.numThreads + druid.processing.numMergeBuffers + 1) * druid.processing.buffer.sizeBytes`
+
+The total memory usage of the MiddleManager + Tasks:
+
+`MM heap size + druid.worker.capacity * (single task memory usage)`
+
+##### Configuration Guidelines for Specific Ingestion Types
+
+###### Kafka/Kinesis Ingestion
+
+If you use the [Kafka Indexing Service](../development/extensions-core/kafka-ingestion.html) or [Kinesis Indexing Service](../development/extensions-core/kinesis-ingestion.html), the number of tasks required will depend on the number of partitions and your taskCount/replica settings.
+
+On top of those requirements, allocating more task slots in your cluster is a good idea, so that you have free task slots available for [Compaction Tasks](../ingestion/compaction.html).
+
+###### Hadoop Ingestion
+
+If you are only using [Hadoop Batch Ingestion](../ingestion/hadoop.html) with no other ingestion types, you can lower the amount of resources allocated per Task. Batch ingestion tasks do not need to answer queries, and the bulk of the ingestion workload will be executed on the Hadoop cluster, so the Tasks do not require much resources.
+
+###### Parallel Native Ingestion
+
+If you are using [Parallel Native Ingestion](../ingestion/native_tasks.html), allocating more available task slots is a good idea and will allow greater ingestion concurrency.
+
+## Coordinator
+
+The main performance-related setting on the Coordinator is the heap size.
+
+The heap requirements of the Coordinator scale with the number of servers, segments, and tasks in the cluster.
+
+You can set the Coordinator heap to the same size as your Broker heap, or slightly smaller: both services have to process cluster-wide state and answer API requests about this state.
+
+## Overlord
+
+The main performance-related setting on the Overlord is the heap size.
+
+The heap requirements of the Overlord scale primarily with the number of running Tasks.
+
+The Overlord tends to require less resources than the Coordinator or Broker. You can generally set the Overlord heap to a value that's 25-50% of your Coordinator heap.
+
+## Router
+
+The Router has light resource requirements, as it proxies requests to Brokers without performing much computational work itself.
+
+You can assign it 256MB heap as a starting point, growing it if needed.
+
+# General Guidelines for Processing Threads and Buffers 
+
+## Processing Threads
+
+The `druid.processing.numThreads` configuration controls the size of the processing thread pool used for computing query results. The size of this pool limits how many queries can be concurrently processed.
+
+## Processing Buffers
+
+`druid.processing.buffer.sizeBytes` is a closely related property that controls the size of the off-heap buffers allocated to the processing threads. 
+
+One buffer is allocated for each processing thread. A size between 500MB and 1GB is a reasonable choice for general use.
+
+The TopN and GroupBy queries use these buffers to store intermediate computed results. As the buffer size increases, more data can be processed in a single pass.
+
+## GroupBy Merging Buffers
+
+If you plan to issue GroupBy V2 queries, `druid.processing.numMergeBuffers` is an important configuration property. 
+
+GroupBy V2 queries use an additional pool of off-heap buffers for merging query results. These buffers have the same size as the processing buffers described above, set by the `druid.processing.buffer.sizeBytes` property.
+
+Non-nested GroupBy V2 queries require 1 merge buffer per query, while a nested GroupBy V2 query requires 2 merge buffers (regardless of the depth of nesting). 
+
+The number of merge buffers determines the number of GroupBy V2 queries that can be processed concurrently.
+
+# General Connection Pool Guidelines
+
+Each Druid process has a configuration property for the number of HTTP connection handling threads, `druid.server.http.numThreads`.
+
+The number of HTTP server threads limits how many concurrent HTTP API requests a given process can handle. 
+
+## Sizing the connection pool for queries
+
+The Broker has a setting `druid.broker.http.numConnections` that controls how many outgoing connections it can make to a given Historical or Task process.
+
+These connections are used to send queries to the Historicals or Tasks, with one connection per query; the value of `druid.broker.http.numConnections` is effectively a limit on the number of concurrent queries that a given broker can process.
+
+Suppose we have a cluster with 3 Brokers and `druid.broker.http.numConnections` is set to 10.
+
+This means that each Broker in the cluster will open up to 10 connections to each individual Historical or Task (for a total of 30 incoming query connections per Historical/Task).
+
+On the Historical/Task side, this means that `druid.server.http.numThreads` must be set to a value at least as high as the sum of `druid.broker.http.numConnections` across all the Brokers in the cluster. 
+
+In practice, you will want to allocate additional server threads for non-query API requests such as status checks; adding 10 threads for those is a good general guideline. Using the example with 3 Brokers in the cluster and `druid.broker.http.numConnections` set to 10, a value of 40 would be appropriate for `druid.server.http.numThreads` on Historicals and Tasks.
+
+As a starting point, allowing for 50 concurrent queries (requests that read segment data from datasources) + 10 non-query requests (other requests like status checks) on Historicals and Tasks is reasonable (i.e., set `druid.server.http.numThreads` to 60 there), while sizing `druid.broker.http.numConnections` based on the number of Brokers in the cluster to fit within the 50 query connection limit per Historical/Task.
+
+- If the connection pool across Brokers and Historicals/Tasks is too small, the cluster will be underutilized as there are too few concurrent query slots.
+- If the connection pool is too large, you may get out-of-memory errors due to excessive concurrent load, and increased resource contention.
+- The connection pool sizing matters most when you require QoS-type guarantees and use query priorities; otherwise, these settings can be more loosely configured.
+- If your cluster usage patterns are heavily biased towards a high number of small concurrent queries (where each query takes less than ~15ms), enlarging the connection pool can be a good idea.
+- The 50/10 general guideline here is a rough starting point, since different queries impose different amounts of load on the system. To size the connection pool more exactly for your cluster, you would need to know the execution times for your queries and ensure that the rate of incoming queries does not exceed your "drain" rate.
+
+# Garbage Collection Settings
+
+We recommend using the G1GC garbage collector:
+
+`-XX:+UseG1GC`
+
+Enabling process termination on out-of-memory errors is useful as well, since the process generally will not recover from such a state, and it's better to restart the process:
+
+`-XX:+ExitOnOutOfMemoryError`
+
+# Per-Segment Direct Memory Buffers
+
+## Segment Decompression
+
+When opening a segment for reading during segment merging or query processing, Druid allocates a 64KB off-heap decompression buffer for each column being read.
+
+Thus, there is additional direct memory overhead of (64KB * number of columns read per segment * number of segments read) when reading segments.
+
+## Segment Merging
+
+In addition to the segment decompression overhead described above, when a set of segments are merged during ingestion, a direct buffer is allocated for every String typed column, for every segment in the set to be merged. 
+
+The size of these buffers are equal to the cardinality of the String column within its segment, times 4 bytes (the buffers store integers).
+ 
+For example, if two segments are being merged, the first segment having a single String column with cardinality 1000, and the second segment having a String column with cardinality 500, the merge step would allocate (1000 + 500) * 4 = 6000 bytes of direct memory. 
+ 
+These buffers are used for merging the value dictionaries of the String column across segments. These "dictionary merging buffers" are independent of the "merge buffers" configured by `druid.processing.numMergeBuffers`.
+
+
+
diff --git a/docs/content/operations/getting-started.md b/docs/content/operations/getting-started.md
new file mode 100644
index 0000000..cef094f
--- /dev/null
+++ b/docs/content/operations/getting-started.md
@@ -0,0 +1,49 @@
+---
+layout: doc_page
+title: "Getting Started with Apache Druid (incubating)"
+---
+
+<!--
+  ~ Licensed to the Apache Software Foundation (ASF) under one
+  ~ or more contributor license agreements.  See the NOTICE file
+  ~ distributed with this work for additional information
+  ~ regarding copyright ownership.  The ASF licenses this file
+  ~ to you under the Apache License, Version 2.0 (the
+  ~ "License"); you may not use this file except in compliance
+  ~ with the License.  You may obtain a copy of the License at
+  ~
+  ~   http://www.apache.org/licenses/LICENSE-2.0
+  ~
+  ~ Unless required by applicable law or agreed to in writing,
+  ~ software distributed under the License is distributed on an
+  ~ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  ~ KIND, either express or implied.  See the License for the
+  ~ specific language governing permissions and limitations
+  ~ under the License.
+  -->
+
+# Getting Started with Apache Druid (incubating)
+
+## Overview
+
+If you are new to Druid, we recommend reading the [Design Overview](../design/index.html) and the [Ingestion Overview](../ingestion/index.html) first for a basic understanding of Druid.
+
+## Single-server Quickstart and Tutorials
+
+To get started with running Druid, the simplest and quickest way is to try the [single-server quickstart and tutorials](../tutorials/index.html).
+
+## Deploying a Druid cluster
+
+If you wish to jump straight to deploying Druid as a cluster, or if you have an existing single-server deployment that you wish to migrate to a clustered deployment, please see the [Clustered Deployment Guide](../tutorials/cluster.md).
+
+## Operating Druid
+
+The [Configuration Reference](../configuration/index.html) describes all of Druid's configuration properties.
+
+The [API Reference](../operations/api-reference.html) describes the APIs available on each Druid process.
+
+The [Basic Cluster Tuning Guide](../operations/basic-cluster-tuning.html) is an introductory guide for tuning your Druid cluster.
+
+## Need help with Druid?
+
+If you have questions about using Druid, please reach out to the [Druid user mailing list or other community channels](https://druid.apache.org/community/)!
\ No newline at end of file
diff --git a/docs/content/operations/performance-faq.md b/docs/content/operations/performance-faq.md
deleted file mode 100644
index c5a48a0..0000000
--- a/docs/content/operations/performance-faq.md
+++ /dev/null
@@ -1,95 +0,0 @@
----
-layout: doc_page
-title: "Performance FAQ"
----
-
-<!--
-  ~ Licensed to the Apache Software Foundation (ASF) under one
-  ~ or more contributor license agreements.  See the NOTICE file
-  ~ distributed with this work for additional information
-  ~ regarding copyright ownership.  The ASF licenses this file
-  ~ to you under the Apache License, Version 2.0 (the
-  ~ "License"); you may not use this file except in compliance
-  ~ with the License.  You may obtain a copy of the License at
-  ~
-  ~   http://www.apache.org/licenses/LICENSE-2.0
-  ~
-  ~ Unless required by applicable law or agreed to in writing,
-  ~ software distributed under the License is distributed on an
-  ~ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-  ~ KIND, either express or implied.  See the License for the
-  ~ specific language governing permissions and limitations
-  ~ under the License.
-  -->
-
-# Performance FAQ
-
-## I can't match your benchmarked results
-
-Improper configuration is by far the largest problem we see people trying to deploy Apache Druid (incubating). The example configurations listed in the tutorials are designed for a small volume of data where all processes are on a single machine. The configs are extremely poor for actual production use.
-
-## What should I set my JVM heap?
-
-The size of the JVM heap really depends on the type of Druid process you are running. Below are a few considerations.
-
-[Broker processes](../design/broker.html) uses the JVM heap mainly to merge results from Historicals and real-times. Brokers also use off-heap memory and processing threads for groupBy queries. We recommend 20G-30G of heap here.
-
-[Historical processes](../design/historical.html) use off-heap memory to store intermediate results, and by default, all segments are memory mapped before they can be queried. Typically, the more memory is available on a Historical process, the more segments can be served without the possibility of data being paged on to disk. On Historicals, the JVM heap is used for [GroupBy queries](../querying/groupbyquery.html), some data structures used for intermediate computation, and general proc [...]
-
-We recommend 250mb * (processing.numThreads) for the heap.
-
-[Coordinator processes](../design/coordinator.html) do not require off-heap memory and the heap is used for loading information about all segments to determine what segments need to be loaded, dropped, moved, or replicated.
-
-## How much direct memory does Druid use?
-Any Druid process that handles queries (Brokers, Peons, and Historicals) uses two kinds of direct memory buffers with configurable size: processing buffers and merge buffers.
-
-Each processing thread is allocated one processing buffer. Additionally, there is a shared pool of merge buffers (only used for GroupBy V2 queries currently).
-
-Other sources of direct memory usage include:
-- When a column is loaded for reading, a 64KB direct buffer is allocated for decompression.
-- When a set of segments are merged during ingestion, a direct buffer is allocated for every String typed column, for every segment in the set to be merged. The size of these buffers are equal to the cardinality of the String column within its segment, times 4 bytes (the buffers store integers). For example, if two segments are being merged, the first segment having a single String column with cardinality 1000, and the second segment having a String column with cardinality 500, the merge [...]
-
-A useful formula for estimating direct memory usage follows:
-
-`druid.processing.buffer.sizeBytes * (druid.processing.numMergeBuffers + druid.processing.numThreads + 1)`
-
-The `+1` is a fuzzy parameter meant to account for the decompression and dictionary merging buffers and may need to be adjusted based on the characteristics of the data being ingested/queried.
-Operators can ensure at least this amount of direct memory is available by providing `-XX:MaxDirectMemorySize=<VALUE>` at the command line.
-
-## What is the intermediate computation buffer?
-The intermediate computation buffer specifies a buffer size for the storage of intermediate results. The computation engine in both the Historical and Realtime processes will use a scratch buffer of this size to do all of their intermediate computations off-heap. Larger values allow for more aggregations in a single pass over the data while smaller values can require more passes depending on the query that is being executed. The default size is 1073741824 bytes (1GB).
-
-## What is server maxSize?
-Server maxSize sets the maximum cumulative segment size (in bytes) that a process can hold. Changing this parameter will affect performance by controlling the memory/disk ratio on a process. Setting this parameter to a value greater than the total memory capacity on a process and may cause disk paging to occur. This paging time introduces a query latency delay.
-
-## My logs are really chatty, can I set them to asynchronously write?
-Yes, using a `log4j2.xml` similar to the following causes some of the more chatty classes to write asynchronously:
-
-```
-<?xml version="1.0" encoding="UTF-8" ?>
-<Configuration status="WARN">
-  <Appenders>
-    <Console name="Console" target="SYSTEM_OUT">
-      <PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/>
-    </Console>
-  </Appenders>
-  <Loggers>
-    <AsyncLogger name="org.apache.druid.curator.inventory.CuratorInventoryManager" level="debug" additivity="false">
-      <AppenderRef ref="Console"/>
-    </AsyncLogger>
-    <AsyncLogger name="org.apache.druid.client.BatchServerInventoryView" level="debug" additivity="false">
-      <AppenderRef ref="Console"/>
-    </AsyncLogger>
-    <!-- Make extra sure nobody adds logs in a bad way that can hurt performance -->
-    <AsyncLogger name="org.apache.druid.client.ServerInventoryView" level="debug" additivity="false">
-      <AppenderRef ref="Console"/>
-    </AsyncLogger>
-    <AsyncLogger name ="org.apache.druid.java.util.http.client.pool.ChannelResourceFactory" level="info" additivity="false">
-      <AppenderRef ref="Console"/>
-    </AsyncLogger>
-    <Root level="info">
-      <AppenderRef ref="Console"/>
-    </Root>
-  </Loggers>
-</Configuration>
-```
diff --git a/docs/content/operations/single-server.md b/docs/content/operations/single-server.md
index 62ae8f9..54a6002 100644
--- a/docs/content/operations/single-server.md
+++ b/docs/content/operations/single-server.md
@@ -40,6 +40,8 @@ The startup scripts for these example configurations run a single ZK instance al
 
 The example configurations run the Druid Coordinator and Overlord together in a single process using the optional configuration `druid.coordinator.asOverlord.enabled=true`, described in the [Coordinator configuration documentation](../configuration/index.html#coordinator-operation).
 
+While example configurations are provided for very large single machines, at higher scales we recommend running Druid in a [clustered deployment](../tutorials/cluster.md), for fault-tolerance and reduced resource contention.
+
 ## Single Server Reference Configurations
 
 Micro-Quickstart: 4 CPU, 16GB RAM
diff --git a/docs/content/toc.md b/docs/content/toc.md
index 3e3ca98..7b179e7 100644
--- a/docs/content/toc.md
+++ b/docs/content/toc.md
@@ -30,21 +30,22 @@ layout: toc
     * [Query processing](/docs/VERSION/design/index.html#query-processing)
     * [External dependencies](/docs/VERSION/design/index.html#external-dependencies)
     * [Ingestion overview](/docs/VERSION/ingestion/index.html)
-  * [Quickstart](/docs/VERSION/tutorials/index.html)
-    * [Tutorial: Loading a file](/docs/VERSION/tutorials/tutorial-batch.html)
-    * [Tutorial: Loading stream data from Apache Kafka](/docs/VERSION/tutorials/tutorial-kafka.html)
-    * [Tutorial: Loading a file using Apache Hadoop](/docs/VERSION/tutorials/tutorial-batch-hadoop.html)
-    * [Tutorial: Loading stream data using HTTP push](/docs/VERSION/tutorials/tutorial-tranquility.html)
-    * [Tutorial: Querying data](/docs/VERSION/tutorials/tutorial-query.html)
-  * Further tutorials
-    * [Tutorial: Rollup](/docs/VERSION/tutorials/tutorial-rollup.html)
-    * [Tutorial: Configuring retention](/docs/VERSION/tutorials/tutorial-retention.html)
-    * [Tutorial: Updating existing data](/docs/VERSION/tutorials/tutorial-update-data.html)
-    * [Tutorial: Compacting segments](/docs/VERSION/tutorials/tutorial-compaction.html)
-    * [Tutorial: Deleting data](/docs/VERSION/tutorials/tutorial-delete-data.html)
-    * [Tutorial: Writing your own ingestion specs](/docs/VERSION/tutorials/tutorial-ingestion-spec.html)
-    * [Tutorial: Transforming input data](/docs/VERSION/tutorials/tutorial-transform-spec.html)
-  * [Clustering](/docs/VERSION/tutorials/cluster.html)
+  * [Getting Started](/docs/VERSION/operations/getting-started.html)
+    * [Single-server Quickstart](/docs/VERSION/tutorials/index.html)
+      * [Tutorial: Loading a file](/docs/VERSION/tutorials/tutorial-batch.html)
+      * [Tutorial: Loading stream data from Apache Kafka](/docs/VERSION/tutorials/tutorial-kafka.html)
+      * [Tutorial: Loading a file using Apache Hadoop](/docs/VERSION/tutorials/tutorial-batch-hadoop.html)
+      * [Tutorial: Loading stream data using HTTP push](/docs/VERSION/tutorials/tutorial-tranquility.html)
+      * [Tutorial: Querying data](/docs/VERSION/tutorials/tutorial-query.html)
+      * Further tutorials
+        * [Tutorial: Rollup](/docs/VERSION/tutorials/tutorial-rollup.html)
+        * [Tutorial: Configuring retention](/docs/VERSION/tutorials/tutorial-retention.html)
+        * [Tutorial: Updating existing data](/docs/VERSION/tutorials/tutorial-update-data.html)
+        * [Tutorial: Compacting segments](/docs/VERSION/tutorials/tutorial-compaction.html)
+        * [Tutorial: Deleting data](/docs/VERSION/tutorials/tutorial-delete-data.html)
+        * [Tutorial: Writing your own ingestion specs](/docs/VERSION/tutorials/tutorial-ingestion-spec.html)
+        * [Tutorial: Transforming input data](/docs/VERSION/tutorials/tutorial-transform-spec.html)
+    * [Clustering](/docs/VERSION/tutorials/cluster.html)
 
 ## Data Ingestion
   * [Ingestion overview](/docs/VERSION/ingestion/index.html)
@@ -131,7 +132,6 @@ layout: toc
   * [Updating the Cluster](/docs/VERSION/operations/rolling-updates.html)
   * [Different Hadoop Versions](/docs/VERSION/operations/other-hadoop.html)
   * [High Availability](/docs/VERSION/operations/high-availability.html)
-  * [Performance FAQ](/docs/VERSION/operations/performance-faq.html)
   * [Management UIs](/docs/VERSION/operations/management-uis.html)
   * [Dump Segment Tool](/docs/VERSION/operations/dump-segment.html)
   * [Insert Segment Tool](/docs/VERSION/operations/insert-segment-to-db.html)
@@ -140,6 +140,9 @@ layout: toc
   * [TLS Support](/docs/VERSION/operations/tls-support.html)
   * [Password Provider](/docs/VERSION/operations/password-provider.html)
   * [HTTP Compression](/docs/VERSION/operations/http-compression.html)
+  * [Basic Cluster Tuning](/docs/VERSION/operations/basic-cluster-tuning.html)
+  * [Single-server Deployment Examples](/docs/VERSION/operations/single-server.html)
+  * [Clustered Deployment Example](/docs/VERSION/operations/example-cluster.html)
 
 ## Configuration
   * [Configuration Reference](/docs/VERSION/configuration/index.html)
@@ -154,7 +157,7 @@ layout: toc
   * [Caching](/docs/VERSION/configuration/index.html#cache-configuration)
   * [General Query Configuration](/docs/VERSION/configuration/index.html#general-query-configuration)
   * [Configuring Logging](/docs/VERSION/configuration/logging.html)
-  
+
 ## Development
   * [Overview](/docs/VERSION/development/overview.html)
   * [Libraries](/libraries.html)
diff --git a/docs/content/tutorials/cluster.md b/docs/content/tutorials/cluster.md
index d729b63..a202d25 100644
--- a/docs/content/tutorials/cluster.md
+++ b/docs/content/tutorials/cluster.md
@@ -1,6 +1,6 @@
 ---
 layout: doc_page
-title: "Clustering"
+title: "Setting up a Clustered Deployment"
 ---
 
 <!--
@@ -22,7 +22,7 @@ title: "Clustering"
   ~ under the License.
   -->
 
-# Clustering
+# Setting up a Clustered Deployment
 
 Apache Druid (incubating) is designed to be deployed as a scalable, fault-tolerant cluster.
 
@@ -30,48 +30,101 @@ In this document, we'll set up a simple cluster and discuss how it can be furthe
 your needs. 
 
 This simple cluster will feature:
- - A single Master server to host the Coordinator and Overlord processes
- - Scalable, fault-tolerant Data servers running Historical and MiddleManager processes
- - Query servers, hosting Druid Broker processes
+ - A Master server to host the Coordinator and Overlord processes
+ - Two scalable, fault-tolerant Data servers running Historical and MiddleManager processes
+ - A query server, hosting the Druid Broker and Router processes
 
-In production, we recommend deploying multiple Master servers with Coordinator and Overlord processes in a fault-tolerant configuration as well.
+In production, we recommend deploying multiple Master servers and multiple Query servers in a fault-tolerant configuration based on your specific fault-tolerance needs, but you can get started quickly with one Master and one Query server and add more servers later.
 
 ## Select hardware
 
-### Master Server
+### Fresh Deployment
 
-The Coordinator and Overlord processes can be co-located on a single server that is responsible for handling the metadata and coordination needs of your cluster.
-The equivalent of an AWS [m3.xlarge](https://aws.amazon.com/ec2/instance-types/#M3) is sufficient for most clusters. This
-hardware offers:
+If you do not have an existing Druid cluster, and wish to start running Druid in a clustered deployment, this guide provides an example clustered deployment with pre-made configurations.
 
-- 4 vCPUs
-- 15 GB RAM
-- 80 GB SSD storage
+#### Master Server
 
-### Data Server
+The Coordinator and Overlord processes are responsible for handling the metadata and coordination needs of your cluster. They can be colocated together on the same server. 
 
-Historicals and MiddleManagers can be colocated on a single server to handle the actual data in your cluster. These servers benefit greatly from CPU, RAM,
-and SSDs. The equivalent of an AWS [r3.2xlarge](https://aws.amazon.com/ec2/instance-types/#r3) is a
-good starting point. This hardware offers:
+In this example, we will be deploying the equivalent of one AWS [m5.2xlarge](https://aws.amazon.com/ec2/instance-types/m5/) instance.
 
+This hardware offers:
 - 8 vCPUs
-- 61 GB RAM
-- 160 GB SSD storage
+- 31 GB RAM
 
-### Query Server
+Example Master server configurations that have been sized for this hardware can be found under `conf/druid/cluster/master`.
+
+#### Data Server
+
+Historicals and MiddleManagers can be colocated on the same server to handle the actual data in your cluster. These servers benefit greatly from CPU, RAM,
+and SSDs. 
+
+In this example, we will be deploying the equivalent of two AWS [i3.4xlarge](https://aws.amazon.com/ec2/instance-types/i3/) instances. 
+
+This hardware offers:
+
+- 16 vCPUs
+- 122 GB RAM
+- 2 * 1.9TB SSD storage
+
+Example Data server configurations that have been sized for this hardware can be found under `conf/druid/cluster/data`.
+
+#### Query Server
 
 Druid Brokers accept queries and farm them out to the rest of the cluster. They also optionally maintain an
-in-memory query cache. These servers benefit greatly from CPU and RAM, and can also be deployed on
-the equivalent of an AWS [r3.2xlarge](https://aws.amazon.com/ec2/instance-types/#r3). This hardware
-offers:
+in-memory query cache. These servers benefit greatly from CPU and RAM.
+ 
+In this example, we will be deploying the equivalent of one AWS [m5.2xlarge](https://aws.amazon.com/ec2/instance-types/m5/) instance. 
 
+This hardware offers:
 - 8 vCPUs
-- 61 GB RAM
-- 160 GB SSD storage
+- 31 GB RAM
 
 You can consider co-locating any open source UIs or query libraries on the same server that the Broker is running on.
 
-Very large clusters should consider selecting larger servers.
+Example Query server configurations that have been sized for this hardware can be found under `conf/druid/cluster/query`.
+
+#### Other Hardware Sizes
+
+The example cluster above is chosen as a single example out of many possible ways to size a Druid cluster.
+
+You can choose smaller/larger hardware or less/more servers for your specific needs and constraints.
+
+If your use case has complex scaling requirements, you can also choose to not co-locate Druid processes (e.g., standalone Historical servers).
+
+The information in the [basic cluster tuning guide](../operations/basic-cluster-tuning.html) can help with your decision-making process and with sizing your configurations.
+
+### Migrating from a Single-Server Deployment
+
+If you have an existing single-server deployment, such as the ones from the [single-server deployment examples](../operations/single-server.html), and you wish to migrate to a clustered deployment of similar scale, the following section contains guidelines for choosing equivalent hardware using the Master/Data/Query server organization.
+
+#### Master Server
+
+The main considerations for the Master server are available CPUs and RAM for the Coordinator and Overlord heaps.
+
+Sum up the allocated heap sizes for your Coordinator and Overlord from the single-server deployment, and choose Master server hardware with enough RAM for the combined heaps, with some extra RAM for other processes on the machine.
+
+For CPU cores, you can choose hardware with approximately 1/4th of the cores of the single-server deployment.
+
+#### Data Server
+
+When choosing Data server hardware for the cluster, the main considerations are available CPUs and RAM, and using SSD storage if feasible.
+
+In a clustered deployment, having multiple Data servers is a good idea for fault-tolerance purposes.
+
+When choosing the Data server hardware, you can choose a split factor `N`, divide the original CPU/RAM of the single-server deployment by `N`, and deploy `N` Data servers of reduced size in the new cluster.
+
+Instructions for adjusting the Historical/MiddleManager configs for the split are described in a later section in this guide.
+
+#### Query Server 
+
+The main considerations for the Query server are available CPUs and RAM for the Broker heap + direct memory, and Router heap.
+
+Sum up the allocated memory sizes for your Broker and Router from the single-server deployment, and choose Query server hardware with enough RAM to cover the Broker/Router, with some extra RAM for other processes on the machine.
+
+For CPU cores, you can choose hardware with approximately 1/4th of the cores of the single-server deployment.
+
+The [basic cluster tuning guide](../operations/basic-cluster-tuning.html) has information on how to calculate Broker/Router memory usage.
 
 ## Select OS
 
@@ -103,25 +156,53 @@ In the package, you should find:
 
 * `DISCLAIMER`, `LICENSE`, and `NOTICE` files
 * `bin/*` - scripts related to the [single-machine quickstart](index.html)
-* `conf/*` - template configurations for a clustered setup
+* `conf/druid/cluster/*` - template configurations for a clustered setup
 * `extensions/*` - core Druid extensions
 * `hadoop-dependencies/*` - Druid Hadoop dependencies
 * `lib/*` - libraries and dependencies for core Druid
 * `quickstart/*` - files related to the [single-machine quickstart](index.html)
 
-We'll be editing the files in `conf/` in order to get things running.
+We'll be editing the files in `conf/druid/cluster/` in order to get things running.
+
+### Migrating from Single-Server Deployments
+
+In the following sections we will be editing the configs under `conf/druid/cluster`.
+
+If you have an existing single-server deployment, please copy your existing configs to `conf/druid/cluster` to preserve any config changes you have made.
+
+## Configure metadata storage and deep storage
+
+### Migrating from Single-Server Deployments
+
+If you have an existing single-server deployment and you wish to preserve your data across the migration, please follow the instructions at [metadata migration](../operations/metadata-migration.html) and [deep storage migration](../operations/deep-storage-migration.html) before updating your metadata/deep storage configs.
 
-## Configure deep storage
+These guides are targeted at single-server deployments that use the Derby metadata store and local deep storage. If you are already using a non-Derby metadata store in your single-server cluster, you can reuse the existing metadata store for the new cluster.
+
+These guides also provide information on migrating segments from local deep storage. A clustered deployment requires distributed deep storage like S3 or HDFS. If your single-server deployment was already using distributed deep storage, you can reuse the existing deep storage for the new cluster.
+
+### Metadata Storage
+
+In `conf/druid/cluster/_common/common.runtime.properties`, replace
+"metadata.storage.*" with the address of the machine that you will use as your metadata store:
+
+- `druid.metadata.storage.connector.connectURI`
+- `druid.metadata.storage.connector.host`
+
+In a production deployment, we recommend running a dedicated metadata store such as MySQL or PostgreSQL with replication, deployed separately from the Druid servers.
+
+The [MySQL extension](../development/extensions-core/mysql.html) and [PostgreSQL extension](../development/extensions-core/postgresql.html) docs have instructions for extension configuration and initial database setup.
+
+### Deep Storage
 
 Druid relies on a distributed filesystem or large object (blob) store for data storage. The most
 commonly used deep storage implementations are S3 (popular for those on AWS) and HDFS (popular if
 you already have a Hadoop deployment).
 
-### S3
+#### S3
 
-In `conf/druid/_common/common.runtime.properties`,
+In `conf/druid/cluster/_common/common.runtime.properties`,
 
-- Set `druid.extensions.loadList=["druid-s3-extensions"]`.
+- Add "druid-s3-extensions" to `druid.extensions.loadList`.
 
 - Comment out the configurations for local storage under "Deep Storage" and "Indexing service logs".
 
@@ -150,11 +231,13 @@ druid.indexer.logs.s3Bucket=your-bucket
 druid.indexer.logs.s3Prefix=druid/indexing-logs
 ```
 
-### HDFS
+Please see the [S3 extension](../development/extensions-core/s3.html) documentation for more info.
+
+#### HDFS
 
-In `conf/druid/_common/common.runtime.properties`,
+In `conf/druid/cluster/_common/common.runtime.properties`,
 
-- Set `druid.extensions.loadList=["druid-hdfs-storage"]`.
+- Add "druid-hdfs-storage" to `druid.extensions.loadList`.
 
 - Comment out the configurations for local storage under "Deep Storage" and "Indexing service logs".
 
@@ -183,7 +266,9 @@ Also,
 
 - Place your Hadoop configuration XMLs (core-site.xml, hdfs-site.xml, yarn-site.xml,
 mapred-site.xml) on the classpath of your Druid processes. You can do this by copying them into
-`conf/druid/_common/`.
+`conf/druid/cluster/_common/`.
+
+Please see the [HDFS extension](../development/extensions-core/hdfs.html) documentation for more info.
 
 ## Configure Tranquility Server (optional)
 
@@ -191,24 +276,18 @@ Data streams can be sent to Druid through a simple HTTP API powered by Tranquili
 Server. If you will be using this functionality, then at this point you should [configure
 Tranquility Server](../ingestion/stream-ingestion.html#server).
 
-## Configure Tranquility Kafka (optional)
-
-Druid can consuming streams from Kafka through Tranquility Kafka. If you will be
-using this functionality, then at this point you should
-[configure Tranquility Kafka](../ingestion/stream-ingestion.html#kafka).
-
 ## Configure for connecting to Hadoop (optional)
 
 If you will be loading data from a Hadoop cluster, then at this point you should configure Druid to be aware
 of your cluster:
 
-- Update `druid.indexer.task.hadoopWorkingPath` in `conf/druid/middleManager/runtime.properties` to
+- Update `druid.indexer.task.hadoopWorkingPath` in `conf/druid/cluster/middleManager/runtime.properties` to
 a path on HDFS that you'd like to use for temporary files required during the indexing process.
 `druid.indexer.task.hadoopWorkingPath=/tmp/druid-indexing` is a common choice.
 
 - Place your Hadoop configuration XMLs (core-site.xml, hdfs-site.xml, yarn-site.xml,
 mapred-site.xml) on the classpath of your Druid processes. You can do this by copying them into
-`conf/druid/_common/core-site.xml`, `conf/druid/_common/hdfs-site.xml`, and so on.
+`conf/druid/cluster/_common/core-site.xml`, `conf/druid/cluster/_common/hdfs-site.xml`, and so on.
 
 Note that you don't need to use HDFS deep storage in order to load data from Hadoop. For example, if
 your cluster is running on Amazon Web Services, we recommend using S3 for deep storage even if you
@@ -216,86 +295,92 @@ are loading data using Hadoop or Elastic MapReduce.
 
 For more info, please see [batch ingestion](../ingestion/batch-ingestion.html).
 
-## Configure addresses for Druid coordination
+## Configure Zookeeper connection
 
-In this simple cluster, you will deploy a single Master server containing the following:
-- A single Druid Coordinator process
-- A single Druid Overlord process
-- A single ZooKeeper istance
-- An embedded Derby metadata store
+In a production cluster, we recommend using a dedicated ZK cluster in a quorum, deployed separately from the Druid servers.
 
-The processes on the cluster need to be configured with the addresses of this ZK instance and the metadata store.
+In `conf/druid/cluster/_common/common.runtime.properties`, set
+`druid.zk.service.host` to a [connection string](https://zookeeper.apache.org/doc/current/zookeeperProgrammers.html)
+containing a comma separated list of host:port pairs, each corresponding to a ZooKeeper server in your ZK quorum.
+(e.g. "127.0.0.1:4545" or "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002")
 
-In `conf/druid/_common/common.runtime.properties`, replace
-"zk.service.host" with [connection string](https://zookeeper.apache.org/doc/current/zookeeperProgrammers.html)
-containing a comma separated list of host:port pairs, each corresponding to a ZooKeeper server
-(e.g. "127.0.0.1:4545" or "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002"):
+You can also choose to run ZK on the Master servers instead of having a dedicated ZK cluster. If doing so, we recommend deploying 3 Master servers so that you have a ZK quorum.
 
-- `druid.zk.service.host`
+## Configuration Tuning
 
-In `conf/druid/_common/common.runtime.properties`, replace
-"metadata.storage.*" with the address of the machine that you will use as your metadata store:
+### Migrating from a Single-Server Deployment
 
-- `druid.metadata.storage.connector.connectURI`
-- `druid.metadata.storage.connector.host`
+#### Master
 
-<div class="note caution">
-In production, we recommend running 2 Master servers, each running a Druid Coordinator process
-and a Druid Overlord process. We also recommend running a ZooKeeper cluster on its own dedicated hardware,
-as well as replicated <a href = "../dependencies/metadata-storage.html">metadata storage</a>
-such as MySQL or PostgreSQL, on its own dedicated hardware.
-</div>
+If you are using an example configuration from [single-server deployment examples](../operations/single-server.html), these examples combine the Coordinator and Overlord processes into one combined process.
 
-## Tune processes on the Data Server
+The example configs under `conf/druid/cluster/master/coordinator-overlord` also combine the Coordinator and Overlord processes.
 
-Druid Historicals and MiddleManagers can be co-located on the same hardware. Both Druid processes benefit greatly from
-being tuned to the hardware they run on. If you are running Tranquility Server or Kafka, you can also colocate Tranquility with these two Druid processes.
-If you are using [r3.2xlarge](https://aws.amazon.com/ec2/instance-types/#r3)
-EC2 instances, or similar hardware, the configuration in the distribution is a
-reasonable starting point.
+You can copy your existing `coordinator-overlord` configs from the single-server deployment to `conf/druid/cluster/master/coordinator-overlord`.
 
-If you are using different hardware, we recommend adjusting configurations for your specific
-hardware. The most commonly adjusted configurations are:
+#### Data
 
-- `-Xmx` and `-Xms`
-- `druid.server.http.numThreads`
-- `druid.processing.buffer.sizeBytes`
-- `druid.processing.numThreads`
-- `druid.query.groupBy.maxIntermediateRows`
-- `druid.query.groupBy.maxResults`
-- `druid.server.maxSize` and `druid.segmentCache.locations` on Historical processes
-- `druid.worker.capacity` on MiddleManagers
+Suppose we are migrating from a single-server deployment that had 32 CPU and 256GB RAM. In the old deployment, the following configurations for Historicals and MiddleManagers were applied:
 
-<div class="note info">
-Keep -XX:MaxDirectMemory >= numThreads*sizeBytes, otherwise Druid will fail to start up..
-</div>
+Historical (Single-server)
+```
+druid.processing.buffer.sizeBytes=500000000
+druid.processing.numMergeBuffers=8
+druid.processing.numThreads=31
+```
 
-Please see the Druid [configuration documentation](../configuration/index.html) for a full description of all
-possible configuration options.
+MiddleManager (Single-server)
+```
+druid.worker.capacity=8
+druid.indexer.fork.property.druid.processing.numMergeBuffers=2
+druid.indexer.fork.property.druid.processing.buffer.sizeBytes=100000000
+druid.indexer.fork.property.druid.processing.numThreads=1
+```
 
-## Tune Druid Brokers on the Query Server
+In the clustered deployment, we can choose a split factor (2 in this example), and deploy 2 Data servers with 16CPU and 128GB RAM each. The areas to scale are the following:
 
-Druid Brokers also benefit greatly from being tuned to the hardware they
-run on. If you are using [r3.2xlarge](https://aws.amazon.com/ec2/instance-types/#r3) EC2 instances,
-or similar hardware, the configuration in the distribution is a reasonable starting point.
+Historical
+- `druid.processing.numThreads`: Set to `(num_cores - 1)` based on the new hardware
+- `druid.processing.numMergeBuffers`: Divide the old value from the single-server deployment by the split factor
+- `druid.processing.buffer.sizeBytes`: Keep this unchanged
 
-If you are using different hardware, we recommend adjusting configurations for your specific
-hardware. The most commonly adjusted configurations are:
+MiddleManager:
+- `druid.worker.capacity`: Divide the old value from the single-server deployment by the split factor
+- `druid.indexer.fork.property.druid.processing.numMergeBuffers`: Keep this unchanged
+- `druid.indexer.fork.property.druid.processing.buffer.sizeBytes`: Keep this unchanged
+- `druid.indexer.fork.property.druid.processing.numThreads`: Keep this unchanged
 
-- `-Xmx` and `-Xms`
-- `druid.server.http.numThreads`
-- `druid.cache.sizeInBytes`
-- `druid.processing.buffer.sizeBytes`
-- `druid.processing.numThreads`
-- `druid.query.groupBy.maxIntermediateRows`
-- `druid.query.groupBy.maxResults`
+The resulting configs after the split:
 
-<div class="note caution">
-Keep -XX:MaxDirectMemory >= numThreads*sizeBytes, otherwise Druid will fail to start up.
-</div>
+New Historical (on 2 Data servers)
+```
+ druid.processing.buffer.sizeBytes=500000000
+ druid.processing.numMergeBuffers=8
+ druid.processing.numThreads=31
+```
+
+New MiddleManager (on 2 Data servers)
+```
+druid.worker.capacity=4
+druid.indexer.fork.property.druid.processing.numMergeBuffers=2
+druid.indexer.fork.property.druid.processing.buffer.sizeBytes=100000000
+druid.indexer.fork.property.druid.processing.numThreads=1
+```
+
+#### Query
+
+You can copy your existing Broker and Router configs to the directories under `conf/druid/cluster/query`, no modifications are needed, as long as the new hardware is sized accordingly.
+
+### Fresh deployment
+
+If you are using the example cluster described above:
+- 1 Master server (m5.2xlarge)
+- 2 Data servers (i3.4xlarge)
+- 1 Query server (m5.2xlarge)
+
+The configurations under `conf/druid/cluster` have already been sized for this hardware and you do not need to make further modifications for general use cases.
 
-Please see the Druid [configuration documentation](../configuration/index.html) for a full description of all
-possible configuration options.
+If you have chosen different hardware, the [basic cluster tuning guide](../operations/basic-cluster-tuning.html) can help you size your configurations.
 
 ## Open ports (if using a firewall)
 
@@ -318,7 +403,6 @@ inbound connections on the following:
 
 ### Other
 - 8200 (Tranquility Server, if used)
-- 8084 (Standalone Realtime, if used, deprecated)
 
 <div class="note caution">
 In production, we recommend deploying ZooKeeper and your metadata store on their own dedicated hardware,
@@ -327,80 +411,88 @@ rather than on the Master server.
 
 ## Start Master Server
 
-Copy the Druid distribution and your edited configurations to your Master server. 
+Copy the Druid distribution and your edited configurations to your Master server.
 
 If you have been editing the configurations on your local machine, you can use *rsync* to copy them:
 
 ```bash
-rsync -az apache-druid-#{DRUIDVERSION}/ COORDINATION_SERVER:apache-druid-#{DRUIDVERSION}/
+rsync -az apache-druid-#{DRUIDVERSION}/ MASTER_SERVER:apache-druid-#{DRUIDVERSION}/
 ```
 
-Log on to your coordination server and install Zookeeper:
+### No Zookeper on Master
+
+From the distribution root, run the following command to start the Master server:
+
+```
+bin/start-cluster-master-no-zk-server
+```
+
+### With Zookeeper on Master
+
+If you plan to run ZK on Master servers, first update `conf/zoo.cfg` to reflect how you plan to run ZK. Then log on to your Master servers and install Zookeeper:
 
 ```bash
 curl http://www.gtlib.gatech.edu/pub/apache/zookeeper/zookeeper-3.4.11/zookeeper-3.4.11.tar.gz -o zookeeper-3.4.11.tar.gz
 tar -xzf zookeeper-3.4.11.tar.gz
-cd zookeeper-3.4.11
-cp conf/zoo_sample.cfg conf/zoo.cfg
-./bin/zkServer.sh start
+mv zookeeper-3.4.11 zk
 ```
 
-<div class="note caution">
-In production, we also recommend running a ZooKeeper cluster on its own dedicated hardware.
-</div>
+If you are running ZK on the Master server, you can start the Master server processes together with ZK using:
 
-On your coordination server, *cd* into the distribution and start up the coordination services (you should do this in different windows or pipe the log to a file):
-
-```bash
-java `cat conf/druid/coordinator/jvm.config | xargs` -cp conf/druid/_common:conf/druid/coordinator:lib/* org.apache.druid.cli.Main server coordinator
-java `cat conf/druid/overlord/jvm.config | xargs` -cp conf/druid/_common:conf/druid/overlord:lib/* org.apache.druid.cli.Main server overlord
+```
+bin/start-cluster-master-with-zk-server
 ```
 
-You should see a log message printed out for each service that starts up. You can view detailed logs
-for any service by looking in the `var/log/druid` directory using another terminal.
+<div class="note caution">
+In production, we also recommend running a ZooKeeper cluster on its own dedicated hardware.
+</div>
 
 ## Start Data Server
 
-Copy the Druid distribution and your edited configurations to your Data servers set aside for the Druid Historicals and MiddleManagers.
+Copy the Druid distribution and your edited configurations to your Data servers.
 
-On each one, *cd* into the distribution and run this command to start the Data server processes:
+From the distribution root, run the following command to start the Data server:
 
-```bash
-java `cat conf/druid/historical/jvm.config | xargs` -cp conf/druid/_common:conf/druid/historical:lib/* org.apache.druid.cli.Main server historical
-java `cat conf/druid/middleManager/jvm.config | xargs` -cp conf/druid/_common:conf/druid/middleManager:lib/* org.apache.druid.cli.Main server middleManager
+```
+bin/start-cluster-data-server
 ```
 
-You can add more Data servers with Druid Historicals and MiddleManagers as needed.
+You can add more Data servers as needed.
 
 <div class="note info">
 For clusters with complex resource allocation needs, you can break apart Historicals and MiddleManagers and scale the components individually.
-This also allows you take advantage of Druid's built-in MiddleManager
-autoscaling facility.
+This also allows you take advantage of Druid's built-in MiddleManager autoscaling facility.
 </div>
 
-If you are doing push-based stream ingestion with Kafka or over HTTP, you can also start Tranquility Server on the same
-hardware that holds MiddleManagers and Historicals. For large scale production, MiddleManagers and Tranquility Server
-can still be co-located. If you are running Tranquility (not server) with a stream processor, you can co-locate
-Tranquility with the stream processor and not require Tranquility Server.
+### Tranquility
+
+If you are doing push-based stream ingestion with Kafka or over HTTP, you can also start Tranquility Server on the Data server. 
+
+For large scale production, Data server processes and the Tranquility Server can still be co-located. 
+
+If you are running Tranquility (not server) with a stream processor, you can co-locate Tranquility with the stream processor and not require Tranquility Server.
+
+First install Tranquility:
 
 ```bash
-curl -O http://static.druid.io/tranquility/releases/tranquility-distribution-0.8.0.tgz
-tar -xzf tranquility-distribution-0.8.0.tgz
-cd tranquility-distribution-0.8.0
-bin/tranquility <server or kafka> -configFile <path_to_druid_distro>/conf/tranquility/<server or kafka>.json
+curl http://static.druid.io/tranquility/releases/tranquility-distribution-0.8.3.tgz -o tranquility-distribution-0.8.3.tgz
+tar -xzf tranquility-distribution-0.8.3.tgz
+mv tranquility-distribution-0.8.3 tranquility
 ```
 
+Afterwards, in `conf/supervise/cluster/data.conf`, uncomment out the `tranquility-server` line, and restart the Data server proceses.
+
 ## Start Query Server
 
-Copy the Druid distribution and your edited configurations to your Query servers set aside for the Druid Brokers.
+Copy the Druid distribution and your edited configurations to your Query servers.
 
-On each Query server, *cd* into the distribution and run this command to start the Broker process (you may want to pipe the output to a log file):
+From the distribution root, run the following command to start the Query server:
 
-```bash
-java `cat conf/druid/broker/jvm.config | xargs` -cp conf/druid/_common:conf/druid/broker:lib/* org.apache.druid.cli.Main server broker
+```
+bin/start-cluster-query-server
 ```
 
-You can add more Query servers as needed based on query load.
+You can add more Query servers as needed based on query load. If you increase the number of Query servers, be sure to adjust the connection pools on your Historicals and Tasks as described in the [basic cluster tuning guide](../operations/basic-cluster-tuning.html).
 
 ## Loading data
 
diff --git a/docs/content/tutorials/index.md b/docs/content/tutorials/index.md
index afd5817..f6302dd 100644
--- a/docs/content/tutorials/index.md
+++ b/docs/content/tutorials/index.md
@@ -1,6 +1,6 @@
 ---
 layout: doc_page
-title: "Apache Druid (incubating) Quickstart"
+title: "Apache Druid (incubating) Single-Server Quickstart"
 ---
 
 <!--
@@ -22,7 +22,7 @@ title: "Apache Druid (incubating) Quickstart"
   ~ under the License.
   -->
 
-# Apache Druid (incubating) Quickstart
+# Apache Druid (incubating) Single-Server Quickstart
 
 In this quickstart, we will download Druid and set it up on a single machine. The cluster will be ready to load data
 after completing this initial setup.
@@ -63,7 +63,7 @@ In the package, you should find:
 
 * `DISCLAIMER`, `LICENSE`, and `NOTICE` files
 * `bin/*` - scripts useful for this quickstart
-* `conf/*` - template configurations for a clustered setup
+* `conf/*` - example configurations for single-server and clustered setup
 * `extensions/*` - core Druid extensions
 * `hadoop-dependencies/*` - Druid Hadoop dependencies
 * `lib/*` - libraries and dependencies for core Druid
diff --git a/docs/content/tutorials/tutorial-batch-hadoop.md b/docs/content/tutorials/tutorial-batch-hadoop.md
index 59f2dff..26b507e 100644
--- a/docs/content/tutorials/tutorial-batch-hadoop.md
+++ b/docs/content/tutorials/tutorial-batch-hadoop.md
@@ -148,13 +148,13 @@ cp /usr/local/hadoop/etc/hadoop/*.xml /shared/hadoop_xml
 From the host machine, run the following, where {PATH_TO_DRUID} is replaced by the path to the Druid package.
 
 ```bash
-mkdir -p {PATH_TO_DRUID}/quickstart/tutorial/conf/druid/_common/hadoop-xml
-cp /tmp/shared/hadoop_xml/*.xml {PATH_TO_DRUID}/quickstart/tutorial/conf/druid/_common/hadoop-xml/
+mkdir -p {PATH_TO_DRUID}/conf/druid/single-server/micro-quickstart/_common/hadoop-xml
+cp /tmp/shared/hadoop_xml/*.xml {PATH_TO_DRUID}/conf/druid/single-server/micro-quickstart/_common/hadoop-xml/
 ```
 
 ### Update Druid segment and log storage
 
-In your favorite text editor, open `quickstart/tutorial/conf/druid/_common/common.runtime.properties`, and make the following edits:
+In your favorite text editor, open `conf/druid/single-server/micro-quickstart/_common/common.runtime.properties`, and make the following edits:
 
 #### Disable local deep storage and enable HDFS deep storage
 
@@ -206,7 +206,7 @@ a task that loads the `wikiticker-2015-09-12-sampled.json.gz` file included in t
 Let's submit the `wikipedia-index-hadoop-.json` task:
 
 ```bash
-bin/post-index-task --file quickstart/tutorial/wikipedia-index-hadoop.json 
+bin/post-index-task --file quickstart/tutorial/wikipedia-index-hadoop.json --url http://localhost:8081
 ```
 
 ## Querying your data
@@ -219,7 +219,7 @@ This tutorial is only meant to be used together with the [query tutorial](../tut
 
 If you wish to go through any of the other tutorials, you will need to:
 * Shut down the cluster and reset the cluster state by removing the contents of the `var` directory under the druid package.
-* Revert the deep storage and task storage config back to local types in `quickstart/tutorial/conf/druid/_common/common.runtime.properties`
+* Revert the deep storage and task storage config back to local types in `conf/druid/single-server/micro-quickstart/_common/common.runtime.properties`
 * Restart the cluster
 
 This is necessary because the other ingestion tutorials will write to the same "wikipedia" datasource, and later tutorials expect the cluster to use local deep storage.
diff --git a/docs/content/tutorials/tutorial-batch.md b/docs/content/tutorials/tutorial-batch.md
index 9fd5892..84a7d27 100644
--- a/docs/content/tutorials/tutorial-batch.md
+++ b/docs/content/tutorials/tutorial-batch.md
@@ -121,7 +121,7 @@ This script will POST an ingestion task to the Druid Overlord and poll Druid unt
 Run the following command from Druid package root:
 
 ```bash
-bin/post-index-task --file quickstart/tutorial/wikipedia-index.json 
+bin/post-index-task --file quickstart/tutorial/wikipedia-index.json --url http://localhost:8081
 ```
 
 You should see output like the following:
@@ -129,8 +129,8 @@ You should see output like the following:
 ```bash
 Beginning indexing data for wikipedia
 Task started: index_wikipedia_2018-07-27T06:37:44.323Z
-Task log:     http://localhost:8090/druid/indexer/v1/task/index_wikipedia_2018-07-27T06:37:44.323Z/log
-Task status:  http://localhost:8090/druid/indexer/v1/task/index_wikipedia_2018-07-27T06:37:44.323Z/status
+Task log:     http://localhost:8081/druid/indexer/v1/task/index_wikipedia_2018-07-27T06:37:44.323Z/log
+Task status:  http://localhost:8081/druid/indexer/v1/task/index_wikipedia_2018-07-27T06:37:44.323Z/status
 Task index_wikipedia_2018-07-27T06:37:44.323Z still running...
 Task index_wikipedia_2018-07-27T06:37:44.323Z still running...
 Task finished with status: SUCCESS
@@ -153,7 +153,7 @@ Let's briefly discuss how we would've submitted the ingestion task without using
 To submit the task, POST it to Druid in a new terminal window from the apache-druid-#{DRUIDVERSION} directory:
 
 ```bash
-curl -X 'POST' -H 'Content-Type:application/json' -d @quickstart/tutorial/wikipedia-index.json http://localhost:8090/druid/indexer/v1/task
+curl -X 'POST' -H 'Content-Type:application/json' -d @quickstart/tutorial/wikipedia-index.json http://localhost:8081/druid/indexer/v1/task
 ```
 
 Which will print the ID of the task if the submission was successful:
diff --git a/docs/content/tutorials/tutorial-compaction.md b/docs/content/tutorials/tutorial-compaction.md
index 97cd8b1..0051796 100644
--- a/docs/content/tutorials/tutorial-compaction.md
+++ b/docs/content/tutorials/tutorial-compaction.md
@@ -41,7 +41,7 @@ For this tutorial, we'll be using the Wikipedia edits sample data, with an inges
 The ingestion spec can be found at `quickstart/tutorial/compaction-init-index.json`. Let's submit that spec, which will create a datasource called `compaction-tutorial`:
 
 ```bash
-bin/post-index-task --file quickstart/tutorial/compaction-init-index.json 
+bin/post-index-task --file quickstart/tutorial/compaction-init-index.json --url http://localhost:8081
 ```
 
 <div class="note caution">
@@ -99,7 +99,7 @@ In this tutorial example, only one compacted segment will be created per hour, a
 Let's submit this task now:
 
 ```bash
-bin/post-index-task --file quickstart/tutorial/compaction-keep-granularity.json
+bin/post-index-task --file quickstart/tutorial/compaction-keep-granularity.json --url http://localhost:8081
 ```
 
 After the task finishes, refresh the [segments view](http://localhost:8888/unified-console.html#segments).
@@ -158,7 +158,7 @@ Note that `segmentGranularity` is set to `DAY` in this compaction task spec.
 Let's submit this task now:
 
 ```bash
-bin/post-index-task --file quickstart/tutorial/compaction-day-granularity.json
+bin/post-index-task --file quickstart/tutorial/compaction-day-granularity.json --url http://localhost:8081
 ```
 
 It will take a bit of time before the Coordinator marks the old input segments as unused, so you may see an intermediate state with 25 total segments. Eventually, there will only be one DAY granularity segment:
diff --git a/docs/content/tutorials/tutorial-delete-data.md b/docs/content/tutorials/tutorial-delete-data.md
index a4b1f7e..46fbbdc 100644
--- a/docs/content/tutorials/tutorial-delete-data.md
+++ b/docs/content/tutorials/tutorial-delete-data.md
@@ -36,7 +36,7 @@ In this tutorial, we will use the Wikipedia edits data, with an indexing spec th
 Let's load this initial data:
 
 ```bash
-bin/post-index-task --file quickstart/tutorial/deletion-index.json 
+bin/post-index-task --file quickstart/tutorial/deletion-index.json --url http://localhost:8081
 ```
 
 When the load finishes, open [http://localhost:8888/unified-console.html#datasources](http://localhost:8888/unified-console.html#datasources) in a browser.
diff --git a/docs/content/tutorials/tutorial-ingestion-spec.md b/docs/content/tutorials/tutorial-ingestion-spec.md
index 29b0ea9..5f05d18 100644
--- a/docs/content/tutorials/tutorial-ingestion-spec.md
+++ b/docs/content/tutorials/tutorial-ingestion-spec.md
@@ -634,7 +634,7 @@ We've finished defining the ingestion spec, it should now look like the followin
 From the apache-druid-#{DRUIDVERSION} package root, run the following command:
 
 ```bash
-bin/post-index-task --file quickstart/ingestion-tutorial-index.json 
+bin/post-index-task --file quickstart/ingestion-tutorial-index.json --url http://localhost:8081
 ```
 
 After the script completes, we will query the data.
diff --git a/docs/content/tutorials/tutorial-retention.md b/docs/content/tutorials/tutorial-retention.md
index dafca32..6f5c91c 100644
--- a/docs/content/tutorials/tutorial-retention.md
+++ b/docs/content/tutorials/tutorial-retention.md
@@ -38,7 +38,7 @@ For this tutorial, we'll be using the Wikipedia edits sample data, with an inges
 The ingestion spec can be found at `quickstart/tutorial/retention-index.json`. Let's submit that spec, which will create a datasource called `retention-tutorial`:
 
 ```bash
-bin/post-index-task --file quickstart/tutorial/retention-index.json 
+bin/post-index-task --file quickstart/tutorial/retention-index.json --url http://localhost:8081
 ```
 
 After the ingestion completes, go to [http://localhost:8888/unified-console.html#datasources](http://localhost:8888/unified-console.html#datasources) in a browser to access the Druid Console's datasource view.
diff --git a/docs/content/tutorials/tutorial-rollup.md b/docs/content/tutorials/tutorial-rollup.md
index 483a463..e4ca658 100644
--- a/docs/content/tutorials/tutorial-rollup.md
+++ b/docs/content/tutorials/tutorial-rollup.md
@@ -117,7 +117,7 @@ We will see how these definitions are used after we load this data.
 From the apache-druid-#{DRUIDVERSION} package root, run the following command:
 
 ```bash
-bin/post-index-task --file quickstart/tutorial/rollup-index.json 
+bin/post-index-task --file quickstart/tutorial/rollup-index.json --url http://localhost:8081
 ```
 
 After the script completes, we will query the data.
diff --git a/docs/content/tutorials/tutorial-tranquility.md b/docs/content/tutorials/tutorial-tranquility.md
index 10376cd..670a91e 100644
--- a/docs/content/tutorials/tutorial-tranquility.md
+++ b/docs/content/tutorials/tutorial-tranquility.md
@@ -48,13 +48,13 @@ The startup scripts for the tutorial will expect the contents of the Tranquility
 
 ## Enable Tranquility Server
 
-- In your `quickstart/tutorial/conf/tutorial-cluster.conf`, uncomment the `tranquility-server` line.
-- Stop your *bin/supervise* command (CTRL-C) and then restart it by again running `bin/supervise -c quickstart/tutorial/conf/tutorial-cluster.conf`.
+- In your `conf/supervise/single-server/micro-quickstart.conf`, uncomment the `tranquility-server` line.
+- Stop your *bin/supervise* command (CTRL-C) and then restart it by again running `bin/supervise -c conf/supervise/single-server/micro-quickstart.conf`.
 
 As part of the output of *supervise* you should see something like:
 
 ```bash
-Running command[tranquility-server], logging to[/stage/apache-druid-#{DRUIDVERSION}/var/sv/tranquility-server.log]: tranquility/bin/tranquility server -configFile quickstart/tutorial/conf/tranquility/server.json -Ddruid.extensions.loadList=[]
+Running command[tranquility-server], logging to[/stage/apache-druid-#{DRUIDVERSION}/var/sv/tranquility-server.log]: tranquility/bin/tranquility server -configFile conf/tranquility/server.json -Ddruid.extensions.loadList=[]
 ```
 
 You can check the log file in `var/sv/tranquility-server.log` to confirm that the server is starting up properly.
@@ -96,7 +96,7 @@ Please follow the [query tutorial](../tutorials/tutorial-query.html) to run some
 
 If you wish to go through any of the other ingestion tutorials, you will need to shut down the cluster and reset the cluster state by removing the contents of the `var` directory under the druid package, as the other tutorials will write to the same "wikipedia" datasource.
 
-When cleaning up after running this Tranquility tutorial, it is also necessary to recomment the `tranquility-server` line in `quickstart/tutorial/conf/tutorial-cluster.conf` before restarting the cluster.
+When cleaning up after running this Tranquility tutorial, it is also necessary to recomment the `tranquility-server` line in `conf/supervise/single-server/micro-quickstart.conf` before restarting the cluster.
 
 
 ## Further reading
diff --git a/docs/content/tutorials/tutorial-transform-spec.md b/docs/content/tutorials/tutorial-transform-spec.md
index 083268d..b30eebb 100644
--- a/docs/content/tutorials/tutorial-transform-spec.md
+++ b/docs/content/tutorials/tutorial-transform-spec.md
@@ -135,7 +135,7 @@ This filter selects the first 3 rows, and it will exclude the final "lion" row i
 Let's submit this task now, which has been included at `quickstart/tutorial/transform-index.json`:
 
 ```bash
-bin/post-index-task --file quickstart/tutorial/transform-index.json
+bin/post-index-task --file quickstart/tutorial/transform-index.json --url http://localhost:8081
 ```
 
 ## Query the transformed data
diff --git a/docs/content/tutorials/tutorial-update-data.md b/docs/content/tutorials/tutorial-update-data.md
index d55ce97..ce0abfc 100644
--- a/docs/content/tutorials/tutorial-update-data.md
+++ b/docs/content/tutorials/tutorial-update-data.md
@@ -44,7 +44,7 @@ The spec we'll use for this tutorial is located at `quickstart/tutorial/updates-
 Let's submit that task:
 
 ```bash
-bin/post-index-task --file quickstart/tutorial/updates-init-index.json 
+bin/post-index-task --file quickstart/tutorial/updates-init-index.json --url http://localhost:8081
 ```
 
 We have three initial rows containing an "animal" dimension and "number" metric:
@@ -72,7 +72,7 @@ Note that this task reads input from `quickstart/tutorial/updates-data2.json`, a
 Let's submit that task:
 
 ```bash
-bin/post-index-task --file quickstart/tutorial/updates-overwrite-index.json 
+bin/post-index-task --file quickstart/tutorial/updates-overwrite-index.json --url http://localhost:8081
 ```
 
 When Druid finishes loading the new segment from this overwrite task, the "tiger" row now has the value "lion", the "aardvark" row has a different number, and the "giraffe" row has been replaced. It may take a couple of minutes for the changes to take effect:
@@ -98,7 +98,7 @@ The `quickstart/tutorial/updates-append-index.json` task spec has been configure
 Let's submit that task:
 
 ```bash
-bin/post-index-task --file quickstart/tutorial/updates-append-index.json 
+bin/post-index-task --file quickstart/tutorial/updates-append-index.json --url http://localhost:8081
 ```
 
 When Druid finishes loading the new segment from this overwrite task, the new rows will have been added to the datasource. Note that roll-up occurred for the "lion" row:
@@ -127,7 +127,7 @@ The `quickstart/tutorial/updates-append-index2.json` task spec reads input from
 Let's submit that task:
 
 ```bash
-bin/post-index-task --file quickstart/tutorial/updates-append-index2.json 
+bin/post-index-task --file quickstart/tutorial/updates-append-index2.json --url http://localhost:8081
 ```
 
 When the new data is loaded, we can see two additional rows after "octopus". Note that the new "bear" row with number 222 has not been rolled up with the existing bear-111 row, because the new data is held in a separate segment.
diff --git a/examples/bin/run-druid b/examples/bin/run-druid
index 82695f6..4db0a2f 100755
--- a/examples/bin/run-druid
+++ b/examples/bin/run-druid
@@ -39,5 +39,5 @@ WHEREAMI="$(cd "$WHEREAMI" && pwd)"
 
 cd "$WHEREAMI/.."
 exec java `cat "$CONFDIR"/"$WHATAMI"/jvm.config | xargs` \
-  -cp "$CONFDIR"/"$WHATAMI":"$CONFDIR"/_common:"$CONFDIR"/_common/hadoop-xml:"$WHEREAMI/../lib/*" \
+  -cp "$CONFDIR"/"$WHATAMI":"$CONFDIR"/_common:"$CONFDIR"/_common/hadoop-xml:"$CONFDIR"/../_common:"$CONFDIR"/../_common/hadoop-xml:"$WHEREAMI/../lib/*" \
   `cat "$CONFDIR"/$WHATAMI/main.config | xargs`
diff --git a/examples/conf/druid/cluster/data/historical/jvm.config b/examples/conf/druid/cluster/data/historical/jvm.config
index 3141abd..891312f 100644
--- a/examples/conf/druid/cluster/data/historical/jvm.config
+++ b/examples/conf/druid/cluster/data/historical/jvm.config
@@ -1,7 +1,7 @@
 -server
 -Xms8g
 -Xmx8g
--XX:MaxDirectMemorySize=14g
+-XX:MaxDirectMemorySize=13g
 -XX:+ExitOnOutOfMemoryError
 -Duser.timezone=UTC
 -Dfile.encoding=UTF-8
diff --git a/examples/conf/druid/cluster/data/historical/runtime.properties b/examples/conf/druid/cluster/data/historical/runtime.properties
index 5ee3a1c..326e6ee 100644
--- a/examples/conf/druid/cluster/data/historical/runtime.properties
+++ b/examples/conf/druid/cluster/data/historical/runtime.properties
@@ -26,7 +26,7 @@ druid.server.http.numThreads=60
 # Processing threads and buffers
 druid.processing.buffer.sizeBytes=500000000
 druid.processing.numMergeBuffers=4
-druid.processing.numThreads=16
+druid.processing.numThreads=15
 druid.processing.tmpDir=var/druid/processing
 
 # Segment storage
@@ -37,4 +37,4 @@ druid.server.maxSize=300000000000
 druid.historical.cache.useCache=true
 druid.historical.cache.populateCache=true
 druid.cache.type=caffeine
-druid.cache.sizeInBytes=2000000000
+druid.cache.sizeInBytes=256000000
diff --git a/examples/conf/druid/cluster/data/middleManager/runtime.properties b/examples/conf/druid/cluster/data/middleManager/runtime.properties
index 8806fd1..4101ebf 100644
--- a/examples/conf/druid/cluster/data/middleManager/runtime.properties
+++ b/examples/conf/druid/cluster/data/middleManager/runtime.properties
@@ -24,7 +24,7 @@ druid.plaintextPort=8091
 druid.worker.capacity=4
 
 # Task launch parameters
-druid.indexer.runner.javaOpts=-server -Xms1g -Xmx1g -Duser.timezone=UTC -Dfile.encoding=UTF-8 -XX:+ExitOnOutOfMemoryError -Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager
+druid.indexer.runner.javaOpts=-server -Xms1g -Xmx1g -XX:MaxDirectMemorySize=1g -Duser.timezone=UTC -Dfile.encoding=UTF-8 -XX:+ExitOnOutOfMemoryError -Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager
 druid.indexer.task.baseTaskDir=var/druid/task
 
 # HTTP server threads
@@ -32,7 +32,7 @@ druid.server.http.numThreads=60
 
 # Processing threads and buffers on Peons
 druid.indexer.fork.property.druid.processing.numMergeBuffers=2
-druid.indexer.fork.property.druid.processing.buffer.sizeBytes=500000000
+druid.indexer.fork.property.druid.processing.buffer.sizeBytes=100000000
 druid.indexer.fork.property.druid.processing.numThreads=1
 
 # Hadoop indexing
diff --git a/examples/conf/druid/single-server/medium/coordinator-overlord/jvm.config b/examples/conf/druid/cluster/master/coordinator-overlord/jvm.config
similarity index 93%
copy from examples/conf/druid/single-server/medium/coordinator-overlord/jvm.config
copy to examples/conf/druid/cluster/master/coordinator-overlord/jvm.config
index 38d2e1e..5df7d60 100644
--- a/examples/conf/druid/single-server/medium/coordinator-overlord/jvm.config
+++ b/examples/conf/druid/cluster/master/coordinator-overlord/jvm.config
@@ -1,6 +1,6 @@
 -server
--Xms12g
--Xmx12g
+-Xms15g
+-Xmx15g
 -XX:+ExitOnOutOfMemoryError
 -XX:+UseG1GC
 -Duser.timezone=UTC
diff --git a/examples/conf/druid/cluster/master/coordinator/main.config b/examples/conf/druid/cluster/master/coordinator-overlord/main.config
similarity index 100%
rename from examples/conf/druid/cluster/master/coordinator/main.config
rename to examples/conf/druid/cluster/master/coordinator-overlord/main.config
diff --git a/examples/conf/druid/cluster/master/coordinator/runtime.properties b/examples/conf/druid/cluster/master/coordinator-overlord/runtime.properties
similarity index 77%
rename from examples/conf/druid/cluster/master/coordinator/runtime.properties
rename to examples/conf/druid/cluster/master/coordinator-overlord/runtime.properties
index 52dd09a..8928cc9 100644
--- a/examples/conf/druid/cluster/master/coordinator/runtime.properties
+++ b/examples/conf/druid/cluster/master/coordinator-overlord/runtime.properties
@@ -22,3 +22,12 @@ druid.plaintextPort=8081
 
 druid.coordinator.startDelay=PT10S
 druid.coordinator.period=PT5S
+
+# Run the overlord service in the coordinator process
+druid.coordinator.asOverlord.enabled=true
+druid.coordinator.asOverlord.overlordService=druid/overlord
+
+druid.indexer.queue.startDelay=PT5S
+
+druid.indexer.runner.type=remote
+druid.indexer.storage.type=metadata
diff --git a/examples/conf/druid/cluster/master/coordinator/jvm.config b/examples/conf/druid/cluster/master/coordinator/jvm.config
deleted file mode 100644
index 084add7..0000000
--- a/examples/conf/druid/cluster/master/coordinator/jvm.config
+++ /dev/null
@@ -1,9 +0,0 @@
--server
--Xms1g
--Xmx1g
--XX:+ExitOnOutOfMemoryError
--Duser.timezone=UTC
--Dfile.encoding=UTF-8
--Djava.io.tmpdir=var/tmp
--Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager
--Dderby.stream.error.file=var/druid/derby.log
diff --git a/examples/conf/druid/cluster/master/overlord/jvm.config b/examples/conf/druid/cluster/master/overlord/jvm.config
deleted file mode 100644
index 2bb6641..0000000
--- a/examples/conf/druid/cluster/master/overlord/jvm.config
+++ /dev/null
@@ -1,8 +0,0 @@
--server
--Xms1g
--Xmx1g
--XX:+ExitOnOutOfMemoryError
--Duser.timezone=UTC
--Dfile.encoding=UTF-8
--Djava.io.tmpdir=var/tmp
--Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager
diff --git a/examples/conf/druid/cluster/master/overlord/main.config b/examples/conf/druid/cluster/master/overlord/main.config
deleted file mode 100644
index dcf691a..0000000
--- a/examples/conf/druid/cluster/master/overlord/main.config
+++ /dev/null
@@ -1 +0,0 @@
-org.apache.druid.cli.Main server overlord
diff --git a/examples/conf/druid/cluster/master/overlord/runtime.properties b/examples/conf/druid/cluster/master/overlord/runtime.properties
deleted file mode 100644
index 093758c..0000000
--- a/examples/conf/druid/cluster/master/overlord/runtime.properties
+++ /dev/null
@@ -1,26 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-druid.service=druid/overlord
-druid.plaintextPort=8090
-
-druid.indexer.queue.startDelay=PT5S
-
-druid.indexer.runner.type=remote
-druid.indexer.storage.type=metadata
diff --git a/examples/conf/druid/cluster/query/broker/jvm.config b/examples/conf/druid/cluster/query/broker/jvm.config
index a66f751..442a7b2 100644
--- a/examples/conf/druid/cluster/query/broker/jvm.config
+++ b/examples/conf/druid/cluster/query/broker/jvm.config
@@ -1,7 +1,7 @@
 -server
--Xms24g
--Xmx24g
--XX:MaxDirectMemorySize=12g
+-Xms12g
+-Xmx12g
+-XX:MaxDirectMemorySize=6g
 -XX:+ExitOnOutOfMemoryError
 -Duser.timezone=UTC
 -Dfile.encoding=UTF-8
diff --git a/examples/conf/druid/cluster/query/broker/runtime.properties b/examples/conf/druid/cluster/query/broker/runtime.properties
index 6d4b369..6873025 100644
--- a/examples/conf/druid/cluster/query/broker/runtime.properties
+++ b/examples/conf/druid/cluster/query/broker/runtime.properties
@@ -25,11 +25,11 @@ druid.server.http.numThreads=60
 
 # HTTP client settings
 druid.broker.http.numConnections=50
-druid.broker.http.maxQueuedBytes=5000000
+druid.broker.http.maxQueuedBytes=10000000
 
 # Processing threads and buffers
 druid.processing.buffer.sizeBytes=500000000
-druid.processing.numMergeBuffers=16
+druid.processing.numMergeBuffers=6
 druid.processing.numThreads=1
 druid.processing.tmpDir=var/druid/processing
 
diff --git a/examples/conf/druid/single-server/large/broker/jvm.config b/examples/conf/druid/single-server/large/broker/jvm.config
index da8c305..6c43c24 100644
--- a/examples/conf/druid/single-server/large/broker/jvm.config
+++ b/examples/conf/druid/single-server/large/broker/jvm.config
@@ -1,7 +1,7 @@
 -server
--Xms16g
--Xmx16g
--XX:MaxDirectMemorySize=8g
+-Xms12g
+-Xmx12g
+-XX:MaxDirectMemorySize=11g
 -XX:+ExitOnOutOfMemoryError
 -XX:+UseG1GC
 -Duser.timezone=UTC
diff --git a/examples/conf/druid/single-server/large/broker/runtime.properties b/examples/conf/druid/single-server/large/broker/runtime.properties
index a38e324..d32929c 100644
--- a/examples/conf/druid/single-server/large/broker/runtime.properties
+++ b/examples/conf/druid/single-server/large/broker/runtime.properties
@@ -25,11 +25,11 @@ druid.server.http.numThreads=60
 
 # HTTP client settings
 druid.broker.http.numConnections=50
-druid.broker.http.maxQueuedBytes=5000000
+druid.broker.http.maxQueuedBytes=10000000
 
 # Processing threads and buffers
 druid.processing.buffer.sizeBytes=500000000
-druid.processing.numMergeBuffers=8
+druid.processing.numMergeBuffers=16
 druid.processing.numThreads=1
 druid.processing.tmpDir=var/druid/processing
 
diff --git a/examples/conf/druid/single-server/large/coordinator-overlord/jvm.config b/examples/conf/druid/single-server/large/coordinator-overlord/jvm.config
index 04b4729..5df7d60 100644
--- a/examples/conf/druid/single-server/large/coordinator-overlord/jvm.config
+++ b/examples/conf/druid/single-server/large/coordinator-overlord/jvm.config
@@ -1,6 +1,6 @@
 -server
--Xms24g
--Xmx24g
+-Xms15g
+-Xmx15g
 -XX:+ExitOnOutOfMemoryError
 -XX:+UseG1GC
 -Duser.timezone=UTC
diff --git a/examples/conf/druid/single-server/large/historical/jvm.config b/examples/conf/druid/single-server/large/historical/jvm.config
index bd616d1..16e1f5d 100644
--- a/examples/conf/druid/single-server/large/historical/jvm.config
+++ b/examples/conf/druid/single-server/large/historical/jvm.config
@@ -1,7 +1,7 @@
 -server
 -Xms16g
 -Xmx16g
--XX:MaxDirectMemorySize=32g
+-XX:MaxDirectMemorySize=25g
 -XX:+ExitOnOutOfMemoryError
 -XX:+UseG1GC
 -Duser.timezone=UTC
diff --git a/examples/conf/druid/single-server/large/historical/runtime.properties b/examples/conf/druid/single-server/large/historical/runtime.properties
index dcb0004..540fba6 100644
--- a/examples/conf/druid/single-server/large/historical/runtime.properties
+++ b/examples/conf/druid/single-server/large/historical/runtime.properties
@@ -26,7 +26,7 @@ druid.server.http.numThreads=60
 # Processing threads and buffers
 druid.processing.buffer.sizeBytes=500000000
 druid.processing.numMergeBuffers=8
-druid.processing.numThreads=32
+druid.processing.numThreads=31
 druid.processing.tmpDir=var/druid/processing
 
 # Segment storage
@@ -37,4 +37,4 @@ druid.server.maxSize=300000000000
 druid.historical.cache.useCache=true
 druid.historical.cache.populateCache=true
 druid.cache.type=caffeine
-druid.cache.sizeInBytes=1000000000
+druid.cache.sizeInBytes=512000000
diff --git a/examples/conf/druid/single-server/large/middleManager/runtime.properties b/examples/conf/druid/single-server/large/middleManager/runtime.properties
index 54b462f..0583b52 100644
--- a/examples/conf/druid/single-server/large/middleManager/runtime.properties
+++ b/examples/conf/druid/single-server/large/middleManager/runtime.properties
@@ -24,7 +24,7 @@ druid.plaintextPort=8091
 druid.worker.capacity=8
 
 # Task launch parameters
-druid.indexer.runner.javaOpts=-server -Xms1g -Xmx1g -Duser.timezone=UTC -Dfile.encoding=UTF-8 -XX:+ExitOnOutOfMemoryError -Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager
+druid.indexer.runner.javaOpts=-server -Xms1g -Xmx1g -XX:MaxDirectMemorySize=1g -Duser.timezone=UTC -Dfile.encoding=UTF-8 -XX:+ExitOnOutOfMemoryError -Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager
 druid.indexer.task.baseTaskDir=var/druid/task
 
 # HTTP server threads
diff --git a/examples/conf/druid/single-server/medium/broker/jvm.config b/examples/conf/druid/single-server/medium/broker/jvm.config
index bdb2411..a4bf3d9 100644
--- a/examples/conf/druid/single-server/medium/broker/jvm.config
+++ b/examples/conf/druid/single-server/medium/broker/jvm.config
@@ -1,7 +1,7 @@
 -server
 -Xms8g
 -Xmx8g
--XX:MaxDirectMemorySize=16g
+-XX:MaxDirectMemorySize=5g
 -XX:+ExitOnOutOfMemoryError
 -XX:+UseG1GC
 -Duser.timezone=UTC
diff --git a/examples/conf/druid/single-server/medium/broker/runtime.properties b/examples/conf/druid/single-server/medium/broker/runtime.properties
index 17e8814..5681b8a 100644
--- a/examples/conf/druid/single-server/medium/broker/runtime.properties
+++ b/examples/conf/druid/single-server/medium/broker/runtime.properties
@@ -25,7 +25,7 @@ druid.server.http.numThreads=60
 
 # HTTP client settings
 druid.broker.http.numConnections=50
-druid.broker.http.maxQueuedBytes=5000000
+druid.broker.http.maxQueuedBytes=10000000
 
 # Processing threads and buffers
 druid.processing.buffer.sizeBytes=500000000
diff --git a/examples/conf/druid/single-server/medium/coordinator-overlord/jvm.config b/examples/conf/druid/single-server/medium/coordinator-overlord/jvm.config
index 38d2e1e..dbddd50 100644
--- a/examples/conf/druid/single-server/medium/coordinator-overlord/jvm.config
+++ b/examples/conf/druid/single-server/medium/coordinator-overlord/jvm.config
@@ -1,6 +1,6 @@
 -server
--Xms12g
--Xmx12g
+-Xms9g
+-Xmx9g
 -XX:+ExitOnOutOfMemoryError
 -XX:+UseG1GC
 -Duser.timezone=UTC
diff --git a/examples/conf/druid/single-server/medium/historical/runtime.properties b/examples/conf/druid/single-server/medium/historical/runtime.properties
index 1a70a71..326e6ee 100644
--- a/examples/conf/druid/single-server/medium/historical/runtime.properties
+++ b/examples/conf/druid/single-server/medium/historical/runtime.properties
@@ -26,7 +26,7 @@ druid.server.http.numThreads=60
 # Processing threads and buffers
 druid.processing.buffer.sizeBytes=500000000
 druid.processing.numMergeBuffers=4
-druid.processing.numThreads=16
+druid.processing.numThreads=15
 druid.processing.tmpDir=var/druid/processing
 
 # Segment storage
diff --git a/examples/conf/druid/single-server/medium/middleManager/runtime.properties b/examples/conf/druid/single-server/medium/middleManager/runtime.properties
index 55d9f1c..4101ebf 100644
--- a/examples/conf/druid/single-server/medium/middleManager/runtime.properties
+++ b/examples/conf/druid/single-server/medium/middleManager/runtime.properties
@@ -24,7 +24,7 @@ druid.plaintextPort=8091
 druid.worker.capacity=4
 
 # Task launch parameters
-druid.indexer.runner.javaOpts=-server -Xms1g -Xmx1g -Duser.timezone=UTC -Dfile.encoding=UTF-8 -XX:+ExitOnOutOfMemoryError -Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager
+druid.indexer.runner.javaOpts=-server -Xms1g -Xmx1g -XX:MaxDirectMemorySize=1g -Duser.timezone=UTC -Dfile.encoding=UTF-8 -XX:+ExitOnOutOfMemoryError -Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager
 druid.indexer.task.baseTaskDir=var/druid/task
 
 # HTTP server threads
diff --git a/examples/conf/druid/single-server/micro-quickstart/middleManager/runtime.properties b/examples/conf/druid/single-server/micro-quickstart/middleManager/runtime.properties
index 8be6e56..280787b 100644
--- a/examples/conf/druid/single-server/micro-quickstart/middleManager/runtime.properties
+++ b/examples/conf/druid/single-server/micro-quickstart/middleManager/runtime.properties
@@ -24,7 +24,7 @@ druid.plaintextPort=8091
 druid.worker.capacity=2
 
 # Task launch parameters
-druid.indexer.runner.javaOpts=-server -Xms1g -Xmx1g -Duser.timezone=UTC -Dfile.encoding=UTF-8 -XX:+ExitOnOutOfMemoryError -Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager
+druid.indexer.runner.javaOpts=-server -Xms1g -Xmx1g -XX:MaxDirectMemorySize=1g -Duser.timezone=UTC -Dfile.encoding=UTF-8 -XX:+ExitOnOutOfMemoryError -Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager
 druid.indexer.task.baseTaskDir=var/druid/task
 
 # HTTP server threads
diff --git a/examples/conf/druid/single-server/small/coordinator-overlord/jvm.config b/examples/conf/druid/single-server/small/coordinator-overlord/jvm.config
index c853ea8..3417668 100644
--- a/examples/conf/druid/single-server/small/coordinator-overlord/jvm.config
+++ b/examples/conf/druid/single-server/small/coordinator-overlord/jvm.config
@@ -1,6 +1,6 @@
 -server
--Xms6g
--Xmx6g
+-Xms4500m
+-Xmx4500m
 -XX:+ExitOnOutOfMemoryError
 -XX:+UseG1GC
 -Duser.timezone=UTC
diff --git a/examples/conf/druid/single-server/small/historical/runtime.properties b/examples/conf/druid/single-server/small/historical/runtime.properties
index 144a029..6cfc704 100644
--- a/examples/conf/druid/single-server/small/historical/runtime.properties
+++ b/examples/conf/druid/single-server/small/historical/runtime.properties
@@ -26,7 +26,7 @@ druid.server.http.numThreads=50
 # Processing threads and buffers
 druid.processing.buffer.sizeBytes=500000000
 druid.processing.numMergeBuffers=2
-druid.processing.numThreads=8
+druid.processing.numThreads=7
 druid.processing.tmpDir=var/druid/processing
 
 # Segment storage
diff --git a/examples/conf/druid/single-server/small/middleManager/runtime.properties b/examples/conf/druid/single-server/small/middleManager/runtime.properties
index 1665e46..f9a8bae 100644
--- a/examples/conf/druid/single-server/small/middleManager/runtime.properties
+++ b/examples/conf/druid/single-server/small/middleManager/runtime.properties
@@ -24,7 +24,7 @@ druid.plaintextPort=8091
 druid.worker.capacity=3
 
 # Task launch parameters
-druid.indexer.runner.javaOpts=-server -Xms1g -Xmx1g -Duser.timezone=UTC -Dfile.encoding=UTF-8 -XX:+ExitOnOutOfMemoryError -Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager
+druid.indexer.runner.javaOpts=-server -Xms1g -Xmx1g -XX:MaxDirectMemorySize=1g -Duser.timezone=UTC -Dfile.encoding=UTF-8 -XX:+ExitOnOutOfMemoryError -Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager
 druid.indexer.task.baseTaskDir=var/druid/task
 
 # HTTP server threads
diff --git a/examples/conf/druid/single-server/xlarge/broker/jvm.config b/examples/conf/druid/single-server/xlarge/broker/jvm.config
index a8844b2..f83ad0e 100644
--- a/examples/conf/druid/single-server/xlarge/broker/jvm.config
+++ b/examples/conf/druid/single-server/xlarge/broker/jvm.config
@@ -1,6 +1,6 @@
 -server
--Xms24g
--Xmx24g
+-Xms16g
+-Xmx16g
 -XX:MaxDirectMemorySize=12g
 -XX:+ExitOnOutOfMemoryError
 -XX:+UseG1GC
diff --git a/examples/conf/druid/single-server/xlarge/broker/runtime.properties b/examples/conf/druid/single-server/xlarge/broker/runtime.properties
index 6d4b369..d32929c 100644
--- a/examples/conf/druid/single-server/xlarge/broker/runtime.properties
+++ b/examples/conf/druid/single-server/xlarge/broker/runtime.properties
@@ -25,7 +25,7 @@ druid.server.http.numThreads=60
 
 # HTTP client settings
 druid.broker.http.numConnections=50
-druid.broker.http.maxQueuedBytes=5000000
+druid.broker.http.maxQueuedBytes=10000000
 
 # Processing threads and buffers
 druid.processing.buffer.sizeBytes=500000000
diff --git a/examples/conf/druid/single-server/xlarge/coordinator-overlord/jvm.config b/examples/conf/druid/single-server/xlarge/coordinator-overlord/jvm.config
index 04b4729..f3ca0fd 100644
--- a/examples/conf/druid/single-server/xlarge/coordinator-overlord/jvm.config
+++ b/examples/conf/druid/single-server/xlarge/coordinator-overlord/jvm.config
@@ -1,6 +1,6 @@
 -server
--Xms24g
--Xmx24g
+-Xms18g
+-Xmx18g
 -XX:+ExitOnOutOfMemoryError
 -XX:+UseG1GC
 -Duser.timezone=UTC
diff --git a/examples/conf/druid/single-server/xlarge/historical/runtime.properties b/examples/conf/druid/single-server/xlarge/historical/runtime.properties
index 11856c5..c322fda 100644
--- a/examples/conf/druid/single-server/xlarge/historical/runtime.properties
+++ b/examples/conf/druid/single-server/xlarge/historical/runtime.properties
@@ -26,7 +26,7 @@ druid.server.http.numThreads=60
 # Processing threads and buffers
 druid.processing.buffer.sizeBytes=500000000
 druid.processing.numMergeBuffers=16
-druid.processing.numThreads=64
+druid.processing.numThreads=63
 druid.processing.tmpDir=var/druid/processing
 
 # Segment storage
diff --git a/examples/conf/druid/single-server/xlarge/middleManager/runtime.properties b/examples/conf/druid/single-server/xlarge/middleManager/runtime.properties
index 889d20d..28732de 100644
--- a/examples/conf/druid/single-server/xlarge/middleManager/runtime.properties
+++ b/examples/conf/druid/single-server/xlarge/middleManager/runtime.properties
@@ -24,7 +24,7 @@ druid.plaintextPort=8091
 druid.worker.capacity=16
 
 # Task launch parameters
-druid.indexer.runner.javaOpts=-server -Xms1g -Xmx1g -Duser.timezone=UTC -Dfile.encoding=UTF-8 -XX:+ExitOnOutOfMemoryError -Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager
+druid.indexer.runner.javaOpts=-server -Xms1g -Xmx1g -XX:MaxDirectMemorySize=1g -Duser.timezone=UTC -Dfile.encoding=UTF-8 -XX:+ExitOnOutOfMemoryError -Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager
 druid.indexer.task.baseTaskDir=var/druid/task
 
 # HTTP server threads
@@ -32,7 +32,7 @@ druid.server.http.numThreads=60
 
 # Processing threads and buffers on Peons
 druid.indexer.fork.property.druid.processing.numMergeBuffers=2
-druid.indexer.fork.property.druid.processing.buffer.sizeBytes=500000000
+druid.indexer.fork.property.druid.processing.buffer.sizeBytes=100000000
 druid.indexer.fork.property.druid.processing.numThreads=1
 
 # Hadoop indexing
diff --git a/examples/conf/supervise/cluster/data.conf b/examples/conf/supervise/cluster/data.conf
index 3047288..32f61d9 100644
--- a/examples/conf/supervise/cluster/data.conf
+++ b/examples/conf/supervise/cluster/data.conf
@@ -1,11 +1,7 @@
 :verify bin/verify-java
-:verify bin/verify-version-check
 
-historical bin/run-druid historical conf/druid/cluster/data/historical
-middleManager bin/run-druid middleManager conf/druid/cluster/data/middleManager
+historical bin/run-druid historical conf/druid/cluster/data
+middleManager bin/run-druid middleManager conf/druid/cluster/data
 
 # Uncomment to use Tranquility Server
-#!p95 tranquility-server bin/tranquility server -configFile conf/tranquility/server.json
-
-# Uncomment to use Tranquility Kafka
-#!p95 tranquility-kafka bin/tranquility kafka -configFile conf/tranquility/kafka.json
+#!p95 tranquility-server tranquility/bin/tranquility server -configFile conf/tranquility/server.json -Ddruid.extensions.loadList=[]
diff --git a/examples/conf/supervise/cluster/master-no-zk.conf b/examples/conf/supervise/cluster/master-no-zk.conf
index 8b22448..2730387 100644
--- a/examples/conf/supervise/cluster/master-no-zk.conf
+++ b/examples/conf/supervise/cluster/master-no-zk.conf
@@ -1,5 +1,3 @@
 :verify bin/verify-java
-:verify bin/verify-version-check
 
-coordinator bin/run-druid coordinator conf/druid/cluster/data/coordinator
-!p80 overlord bin/run-druid overlord conf/druid/cluster/data/overlord
+coordinator-overlord bin/run-druid coordinator-overlord conf/druid/cluster/master
diff --git a/examples/conf/supervise/cluster/master-with-zk.conf b/examples/conf/supervise/cluster/master-with-zk.conf
index 8eeea0c..2399827 100644
--- a/examples/conf/supervise/cluster/master-with-zk.conf
+++ b/examples/conf/supervise/cluster/master-with-zk.conf
@@ -1,6 +1,4 @@
 :verify bin/verify-java
-:verify bin/verify-version-check
 
 !p10 zk bin/run-zk conf
-coordinator bin/run-druid coordinator conf/druid/cluster/data/coordinator
-!p80 overlord bin/run-druid overlord conf/druid/cluster/data/overlord
+coordinator-overlord bin/run-druid coordinator-overlord conf/druid/cluster/master
diff --git a/examples/conf/supervise/cluster/query.conf b/examples/conf/supervise/cluster/query.conf
index cd6ec37..ead75fd 100644
--- a/examples/conf/supervise/cluster/query.conf
+++ b/examples/conf/supervise/cluster/query.conf
@@ -1,5 +1,4 @@
 :verify bin/verify-java
-:verify bin/verify-version-check
 
-broker bin/run-druid broker conf/druid/cluster/data/broker
-router bin/run-druid router conf/druid/cluster/data/router
+broker bin/run-druid broker conf/druid/cluster/query
+router bin/run-druid router conf/druid/cluster/query


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@druid.apache.org
For additional commands, e-mail: commits-help@druid.apache.org