You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@druid.apache.org by te...@apache.org on 2023/05/19 16:42:37 UTC

[druid] branch master updated: Update Ingestion section (#14023)

This is an automated email from the ASF dual-hosted git repository.

techdocsmith pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/druid.git


The following commit(s) were added to refs/heads/master by this push:
     new 269137c682 Update Ingestion section (#14023)
269137c682 is described below

commit 269137c6828c5a2a4f7308c33d239c2c019c87b7
Author: Katya Macedo <38...@users.noreply.github.com>
AuthorDate: Fri May 19 11:42:27 2023 -0500

    Update Ingestion section (#14023)
    
    Co-authored-by: Charles Smith <te...@gmail.com>
    Co-authored-by: Victoria Lim <vt...@users.noreply.github.com>
    Co-authored-by: Victoria Lim <li...@gmail.com>
---
 README.md                                          |   2 +-
 .../{operations => api-reference}/api-reference.md |  88 ++++-----
 docs/{querying => api-reference}/sql-api.md        |  29 ++-
 .../api.md => api-reference/sql-ingestion-api.md}  |  18 +-
 docs/{querying => api-reference}/sql-jdbc.md       |  14 +-
 docs/{development => configuration}/extensions.md  |  18 +-
 docs/configuration/index.md                        |  19 +-
 docs/data-management/automatic-compaction.md       |  10 +-
 docs/data-management/compaction.md                 |   2 +-
 docs/data-management/delete.md                     |   4 +-
 docs/data-management/update.md                     |   2 +-
 docs/design/architecture.md                        |   8 +-
 docs/design/broker.md                              |   2 +-
 docs/design/coordinator.md                         |   4 +-
 docs/{dependencies => design}/deep-storage.md      |   2 +-
 docs/design/extensions-contrib/dropwizard.md       |   2 +-
 docs/design/historical.md                          |   2 +-
 docs/design/indexer.md                             |   2 +-
 docs/design/indexing-service.md                    |   2 +-
 docs/{dependencies => design}/metadata-storage.md  |   0
 docs/design/middlemanager.md                       |   2 +-
 docs/design/overlord.md                            |   2 +-
 docs/design/peons.md                               |   2 +-
 docs/design/router.md                              |   2 +-
 docs/{dependencies => design}/zookeeper.md         |   0
 docs/development/experimental-features.md          |   2 +-
 .../extensions-contrib/aliyun-oss-extensions.md    |   2 +-
 .../extensions-contrib/ambari-metrics-emitter.md   |   2 +-
 docs/development/extensions-contrib/cassandra.md   |   2 +-
 docs/development/extensions-contrib/cloudfiles.md  |   2 +-
 .../extensions-contrib/compressed-big-decimal.md   |   2 +-
 .../extensions-contrib/distinctcount.md            |   2 +-
 .../extensions-contrib/gce-extensions.md           |   2 +-
 docs/development/extensions-contrib/graphite.md    |   2 +-
 docs/development/extensions-contrib/influx.md      |   2 +-
 .../extensions-contrib/influxdb-emitter.md         |   2 +-
 docs/development/extensions-contrib/k8s-jobs.md    |   2 +-
 .../extensions-contrib/kafka-emitter.md            |   2 +-
 .../extensions-contrib/momentsketch-quantiles.md   |   2 +-
 .../extensions-contrib/moving-average-query.md     |   2 +-
 .../extensions-contrib/opentsdb-emitter.md         |   2 +-
 docs/development/extensions-contrib/prometheus.md  |   2 +-
 docs/development/extensions-contrib/redis-cache.md |   4 +-
 docs/development/extensions-contrib/sqlserver.md   |   2 +-
 docs/development/extensions-contrib/statsd.md      |   2 +-
 .../extensions-contrib/tdigestsketch-quantiles.md  |   2 +-
 docs/development/extensions-contrib/thrift.md      |   2 +-
 .../development/extensions-contrib/time-min-max.md |   2 +-
 .../extensions-core/approximate-histograms.md      |   2 +-
 docs/development/extensions-core/avro.md           |   2 +-
 docs/development/extensions-core/azure.md          |   2 +-
 docs/development/extensions-core/bloom-filter.md   |   4 +-
 .../extensions-core/datasketches-extension.md      |   2 +-
 .../extensions-core/datasketches-hll.md            |   2 +-
 .../extensions-core/datasketches-kll.md            |   2 +-
 .../extensions-core/datasketches-quantiles.md      |   2 +-
 .../extensions-core/datasketches-theta.md          |   2 +-
 .../extensions-core/datasketches-tuple.md          |   2 +-
 docs/development/extensions-core/druid-aws-rds.md  |   2 +-
 .../extensions-core/druid-basic-security.md        |   4 +-
 docs/development/extensions-core/druid-kerberos.md |   4 +-
 docs/development/extensions-core/druid-lookups.md  |   2 +-
 .../extensions-core/druid-ranger-security.md       |   6 +-
 docs/development/extensions-core/google.md         |   4 +-
 docs/development/extensions-core/hdfs.md           |  10 +-
 .../extensions-core/kafka-extraction-namespace.md  |   2 +-
 .../development/extensions-core/kafka-ingestion.md |   2 +-
 .../extensions-core/kafka-supervisor-operations.md |   2 +-
 .../extensions-core/kafka-supervisor-reference.md  |   4 +-
 .../extensions-core/kinesis-ingestion.md           |   6 +-
 docs/development/extensions-core/kubernetes.md     |   2 +-
 .../extensions-core/lookups-cached-global.md       |   4 +-
 docs/development/extensions-core/mysql.md          |   2 +-
 docs/development/extensions-core/orc.md            |   2 +-
 docs/development/extensions-core/parquet.md        |   2 +-
 docs/development/extensions-core/postgresql.md     |   4 +-
 docs/development/extensions-core/protobuf.md       |   2 +-
 docs/development/extensions-core/s3.md             |   8 +-
 docs/development/extensions-core/stats.md          |   2 +-
 docs/ingestion/data-formats.md                     |   7 +-
 docs/ingestion/faq.md                              |  12 +-
 docs/ingestion/hadoop.md                           |   2 +-
 docs/ingestion/index.md                            |  11 +-
 docs/ingestion/ingestion-spec.md                   |  18 +-
 ...tive-batch-input-source.md => input-sources.md} |   6 +-
 docs/ingestion/native-batch-firehose.md            |   4 +-
 docs/ingestion/native-batch-simple-task.md         |   4 +-
 docs/ingestion/native-batch.md                     |  39 ++--
 docs/ingestion/rollup.md                           |   4 +-
 docs/ingestion/schema-design.md                    |  12 +-
 docs/ingestion/{data-model.md => schema-model.md}  |   6 +-
 docs/ingestion/tasks.md                            |  25 +--
 docs/multi-stage-query/concepts.md                 |   8 +-
 docs/multi-stage-query/index.md                    |   4 +-
 docs/multi-stage-query/reference.md                |   2 +-
 docs/multi-stage-query/security.md                 |   3 +-
 docs/{design => operations}/auth.md                |   0
 docs/operations/clean-metadata-store.md            |   4 +-
 docs/operations/getting-started.md                 |   2 +-
 .../migrate-from-firehose-ingestion.md             |  12 +-
 docs/operations/pull-deps.md                       |   2 +-
 docs/operations/rule-configuration.md              |   4 +-
 docs/operations/security-overview.md               |   2 +-
 docs/operations/security-user-auth.md              |   4 +-
 docs/operations/tls-support.md                     |   4 +-
 docs/querying/caching.md                           |   8 +-
 docs/querying/datasource.md                        |   2 +-
 docs/querying/filters.md                           |   2 +-
 docs/{development => querying}/geo.md              |   0
 docs/{misc => querying}/math-expr.md               |   0
 docs/querying/nested-columns.md                    |   2 +-
 docs/querying/post-aggregations.md                 |   2 +-
 docs/querying/query-context.md                     |  12 +-
 docs/querying/querying.md                          |   2 +-
 docs/querying/sql-data-types.md                    |   2 +-
 docs/querying/sql-query-context.md                 |   4 +-
 docs/querying/sql-translation.md                   |   2 +-
 docs/querying/sql.md                               |   8 +-
 docs/querying/using-caching.md                     |   2 +-
 docs/querying/virtual-columns.md                   |   4 +-
 docs/tutorials/cluster.md                          |   3 +-
 docs/tutorials/docker.md                           |   3 +-
 docs/tutorials/index.md                            |   3 +-
 docs/tutorials/tutorial-batch-hadoop.md            |   4 +-
 docs/tutorials/tutorial-batch-native.md            |   5 +-
 docs/tutorials/tutorial-batch.md                   |   2 +-
 docs/tutorials/tutorial-compaction.md              |   4 +-
 docs/tutorials/tutorial-ingestion-spec.md          |   4 +-
 docs/tutorials/tutorial-jdbc.md                    |   8 +-
 docs/tutorials/tutorial-jupyter-index.md           |   3 +-
 docs/tutorials/tutorial-kafka.md                   |   6 +-
 docs/tutorials/tutorial-kerberos-hadoop.md         |   4 +-
 docs/tutorials/tutorial-msq-convert-spec.md        |   4 +-
 docs/tutorials/tutorial-msq-extern.md              |   4 +-
 docs/tutorials/tutorial-query.md                   |  12 +-
 docs/tutorials/tutorial-retention.md               |   6 +-
 docs/tutorials/tutorial-rollup.md                  |  14 +-
 docs/tutorials/tutorial-sql-query-view.md          |   6 +-
 docs/tutorials/tutorial-transform-spec.md          |   6 +-
 docs/tutorials/tutorial-update-data.md             |  10 +-
 .../druidapi/druidapi/datasource.py                |   2 +-
 .../jupyter-notebooks/druidapi/druidapi/status.py  |   2 +-
 .../jupyter-notebooks/druidapi/druidapi/tasks.py   |   2 +-
 .../java/org/apache/druid/cli/CliCoordinator.java  |   2 +-
 .../src/druid-models/input-source/input-source.tsx |   2 +-
 web-console/src/links.ts                           |   2 +-
 .../schema-step/schema-step.tsx                    |   2 +-
 website/.spelling                                  | 212 +++++++--------------
 website/redirects.json                             |  48 +++--
 website/sidebars.json                              |  89 ++++-----
 150 files changed, 529 insertions(+), 582 deletions(-)

diff --git a/README.md b/README.md
index c23f11d224..5f516e650f 100644
--- a/README.md
+++ b/README.md
@@ -85,7 +85,7 @@ Use the built-in query workbench to prototype [DruidSQL](https://druid.apache.or
 
 ### Documentation
 
-See the [latest documentation](https://druid.apache.org/docs/latest/) for the documentation for the current official release.  If you need information on a previous release, you can browse [previous releases documentation](https://druid.apache.org/docs/).
+See the [latest documentation](https://druid.apache.org/docs/latest/) for the documentation for the current official release. If you need information on a previous release, you can browse [previous releases documentation](https://druid.apache.org/docs/).
 
 Make documentation and tutorials updates in [`/docs`](https://github.com/apache/druid/tree/master/docs) using [MarkDown](https://www.markdownguide.org/) and contribute them using a pull request.
 
diff --git a/docs/operations/api-reference.md b/docs/api-reference/api-reference.md
similarity index 92%
rename from docs/operations/api-reference.md
rename to docs/api-reference/api-reference.md
index af390e0774..9b762c0818 100644
--- a/docs/operations/api-reference.md
+++ b/docs/api-reference/api-reference.md
@@ -1,6 +1,7 @@
 ---
 id: api-reference
-title: "API reference"
+title: HTTP API endpoints reference
+sidebar_label: API endpoints reference
 ---
 
 <!--
@@ -23,21 +24,21 @@ title: "API reference"
   -->
 
 
-This page documents all of the API endpoints for each Druid service type.
+This topic documents all of the API endpoints for each Druid service type.
 
 ## Common
 
-The following endpoints are supported by all processes.
+All processes support the following endpoints.
 
 ### Process information
 
 `GET /status`
 
-Returns the Druid version, loaded extensions, memory used, total memory and other useful information about the process.
+Returns the Druid version, loaded extensions, memory used, total memory, and other useful information about the process.
 
 `GET /status/health`
 
-An endpoint that always returns a boolean "true" value with a 200 OK response, useful for automated health checks.
+Always returns a boolean `true` value with a 200 OK response, useful for automated health checks.
 
 `GET /status/properties`
 
@@ -77,7 +78,7 @@ Returns the current leader Coordinator of the cluster.
 
 `GET /druid/coordinator/v1/isLeader`
 
-Returns a JSON object with field "leader", either true or false, indicating if this server is the current leader
+Returns a JSON object with `leader` parameter, either true or false, indicating if this server is the current leader
 Coordinator of the cluster. In addition, returns HTTP 200 if the server is the current leader and HTTP 404 if not.
 This is suitable for use as a load balancer status check if you only want the active leader to be considered in-service
 at the load balancer.
@@ -119,11 +120,10 @@ Returns the number of segments to load and drop, as well as the total segment lo
 
 Returns the serialized JSON of segments to load and drop for each Historical process.
 
-
 #### Segment loading by datasource
 
-Note that all _interval_ query parameters are ISO 8601 strings (e.g., 2016-06-27/2016-06-28).
-Also note that these APIs only guarantees that the segments are available at the time of the call. 
+Note that all _interval_ query parameters are ISO 8601 strings&mdash;for example, 2016-06-27/2016-06-28.
+Also note that these APIs only guarantees that the segments are available at the time of the call.
 Segments can still become missing because of historical process failures or any other reasons afterward.
 
 `GET /druid/coordinator/v1/datasources/{dataSourceName}/loadstatus?forceMetadataRefresh={boolean}&interval={myInterval}`
@@ -144,7 +144,7 @@ over the given interval (or last 2 weeks if interval is not given). This does no
 (Note: `forceMetadataRefresh=true` refreshes Coordinator's metadata cache of all datasources. This can be a heavy operation in terms 
 of the load on the metadata store but can be necessary to make sure that we verify all the latest segments' load status)
 * Setting `forceMetadataRefresh` to false will use the metadata cached on the coordinator from the last force/periodic refresh. 
-If no used segments are found for the given inputs, this API returns `204 No Content` 
+If no used segments are found for the given inputs, this API returns `204 No Content`
 
 `GET /druid/coordinator/v1/datasources/{dataSourceName}/loadstatus?full&forceMetadataRefresh={boolean}&interval={myInterval}`
 
@@ -216,18 +216,17 @@ segment is unused, or is unknown, a 404 response is returned.
 
 `GET /druid/coordinator/v1/metadata/datasources/{dataSourceName}/segments`
 
-Returns a list of all segments, overlapping with any of given intervals,  for a datasource as stored in the metadata store. Request body is array of string IS0 8601 intervals like [interval1, interval2,...] for example ["2012-01-01T00:00:00.000/2012-01-03T00:00:00.000", "2012-01-05T00:00:00.000/2012-01-07T00:00:00.000"]
+Returns a list of all segments, overlapping with any of given intervals,  for a datasource as stored in the metadata store. Request body is array of string IS0 8601 intervals like `[interval1, interval2,...]`&mdash;for example, `["2012-01-01T00:00:00.000/2012-01-03T00:00:00.000", "2012-01-05T00:00:00.000/2012-01-07T00:00:00.000"]`.
 
 `GET /druid/coordinator/v1/metadata/datasources/{dataSourceName}/segments?full`
 
-Returns a list of all segments, overlapping with any of given intervals, for a datasource with the full segment metadata as stored in the metadata store. Request body is array of string ISO 8601 intervals like [interval1, interval2,...] for example ["2012-01-01T00:00:00.000/2012-01-03T00:00:00.000", "2012-01-05T00:00:00.000/2012-01-07T00:00:00.000"]
+Returns a list of all segments, overlapping with any of given intervals, for a datasource with the full segment metadata as stored in the metadata store. Request body is array of string ISO 8601 intervals like `[interval1, interval2,...]`&mdash;for example, `["2012-01-01T00:00:00.000/2012-01-03T00:00:00.000", "2012-01-05T00:00:00.000/2012-01-07T00:00:00.000"]`.
 
 <a name="coordinator-datasources"></a>
 
 #### Datasources
 
-Note that all _interval_ URL parameters are ISO 8601 strings delimited by a `_` instead of a `/`
-(e.g., 2016-06-27_2016-06-28).
+Note that all _interval_ URL parameters are ISO 8601 strings delimited by a `_` instead of a `/`&mdash;for example, `2016-06-27_2016-06-28`.
 
 `GET /druid/coordinator/v1/datasources`
 
@@ -235,7 +234,7 @@ Returns a list of datasource names found in the cluster as seen by the coordinat
 
 `GET /druid/coordinator/v1/datasources?simple`
 
-Returns a list of JSON objects containing the name and properties of datasources found in the cluster.  Properties include segment count, total segment byte size, replicated total segment byte size, minTime, and maxTime.
+Returns a list of JSON objects containing the name and properties of datasources found in the cluster. Properties include segment count, total segment byte size, replicated total segment byte size, minTime, and maxTime.
 
 `GET /druid/coordinator/v1/datasources?full`
 
@@ -247,7 +246,7 @@ Returns a JSON object containing the name and properties of a datasource. Proper
 
 `GET /druid/coordinator/v1/datasources/{dataSourceName}?full`
 
-Returns full metadata for a datasource .
+Returns full metadata for a datasource.
 
 `GET /druid/coordinator/v1/datasources/{dataSourceName}/intervals`
 
@@ -294,6 +293,7 @@ Returns full segment metadata for a specific segment in the cluster.
 Return the tiers that a datasource exists in.
 
 #### Note for Coordinator's POST and DELETE APIs
+
 While segments may be enabled by issuing POST requests for the datasources, the Coordinator may again disable segments if they match any configured [drop rules](../operations/rule-configuration.md#drop-rules). Even if segments are enabled by these APIs, you must configure a [load rule](../operations/rule-configuration.md#load-rules) to load them onto Historical processes. If an indexing or kill task runs at the same time these APIs are invoked, the behavior is undefined. Some segments mi [...]
 
 > Avoid using indexing or kill tasks and these APIs at the same time for the same datasource and time chunk.
@@ -316,8 +316,8 @@ result of this API call.
 
 Marks segments (un)used for a datasource by interval or set of segment Ids. When marking used only segments that are not overshadowed will be updated.
 
-The request payload contains the interval or set of segment Ids to be marked unused.
-Either interval or segment ids should be provided, if both or none are provided in the payload, the API would throw an error (400 BAD REQUEST).
+The request payload contains the interval or set of segment IDs to be marked unused.
+Either interval or segment IDs should be provided, if both or none are provided in the payload, the API would throw an error (400 BAD REQUEST).
 
 Interval specifies the start and end times as IS0 8601 strings. `interval=(start/end)` where start and end both are inclusive and only the segments completely contained within the specified interval will be disabled, partially overlapping segments will not be affected.
 
@@ -325,9 +325,8 @@ JSON Request Payload:
 
  |Key|Description|Example|
 |----------|-------------|---------|
-|`interval`|The interval for which to mark segments unused|"2015-09-12T03:00:00.000Z/2015-09-12T05:00:00.000Z"|
-|`segmentIds`|Set of segment Ids to be marked unused|["segmentId1", "segmentId2"]|
-
+|`interval`|The interval for which to mark segments unused|`"2015-09-12T03:00:00.000Z/2015-09-12T05:00:00.000Z"`|
+|`segmentIds`|Set of segment IDs to be marked unused|`["segmentId1", "segmentId2"]`|
 
 `DELETE /druid/coordinator/v1/datasources/{dataSourceName}`
 
@@ -348,8 +347,7 @@ result of this API call.
 
 #### Retention rules
 
-Note that all _interval_ URL parameters are ISO 8601 strings delimited by a `_` instead of a `/`
-(e.g., 2016-06-27_2016-06-28).
+Note that all _interval_ URL parameters are ISO 8601 strings delimited by a `_` instead of a `/` as in `2016-06-27_2016-06-28`.
 
 `GET /druid/coordinator/v1/rules`
 
@@ -365,7 +363,7 @@ Returns all rules for a specified datasource and includes default datasource.
 
 `GET /druid/coordinator/v1/rules/history?interval=<interval>`
 
-Returns audit history of rules for all datasources. default value of interval can be specified by setting `druid.audit.manager.auditHistoryMillis` (1 week if not configured) in Coordinator runtime.properties
+Returns audit history of rules for all datasources. Default value of interval can be specified by setting `druid.audit.manager.auditHistoryMillis` (1 week if not configured) in Coordinator `runtime.properties`.
 
 `GET /druid/coordinator/v1/rules/history?count=<n>`
 
@@ -373,7 +371,7 @@ Returns last `n` entries of audit history of rules for all datasources.
 
 `GET /druid/coordinator/v1/rules/{dataSourceName}/history?interval=<interval>`
 
-Returns audit history of rules for a specified datasource. default value of interval can be specified by setting `druid.audit.manager.auditHistoryMillis` (1 week if not configured) in Coordinator runtime.properties
+Returns audit history of rules for a specified datasource. Default value of interval can be specified by setting `druid.audit.manager.auditHistoryMillis` (1 week if not configured) in Coordinator `runtime.properties`.
 
 `GET /druid/coordinator/v1/rules/{dataSourceName}/history?count=<n>`
 
@@ -387,13 +385,12 @@ Optional Header Parameters for auditing the config change can also be specified.
 
 |Header Param Name| Description | Default |
 |----------|-------------|---------|
-|`X-Druid-Author`| author making the config change|""|
-|`X-Druid-Comment`| comment describing the change being done|""|
+|`X-Druid-Author`| Author making the config change|`""`|
+|`X-Druid-Comment`| Comment describing the change being done|`""`|
 
 #### Intervals
 
-Note that all _interval_ URL parameters are ISO 8601 strings delimited by a `_` instead of a `/`
-(e.g., 2016-06-27_2016-06-28).
+Note that all _interval_ URL parameters are ISO 8601 strings delimited by a `_` instead of a `/` as in `2016-06-27_2016-06-28`.
 
 `GET /druid/coordinator/v1/intervals`
 
@@ -401,22 +398,22 @@ Returns all intervals for all datasources with total size and count.
 
 `GET /druid/coordinator/v1/intervals/{interval}`
 
-Returns aggregated total size and count for all intervals that intersect given isointerval.
+Returns aggregated total size and count for all intervals that intersect given ISO interval.
 
 `GET /druid/coordinator/v1/intervals/{interval}?simple`
 
-Returns total size and count for each interval within given isointerval.
+Returns total size and count for each interval within given ISO interval.
 
 `GET /druid/coordinator/v1/intervals/{interval}?full`
 
-Returns total size and count for each datasource for each interval within given isointerval.
+Returns total size and count for each datasource for each interval within given ISO interval.
 
 #### Dynamic configuration
 
 See [Coordinator Dynamic Configuration](../configuration/index.md#dynamic-configuration) for details.
 
 Note that all _interval_ URL parameters are ISO 8601 strings delimited by a `_` instead of a `/`
-(e.g., 2016-06-27_2016-06-28).
+as in `2016-06-27_2016-06-28`.
 
 `GET /druid/coordinator/v1/config`
 
@@ -437,11 +434,10 @@ Update overlord dynamic worker configuration.
 
 Returns the total size of segments awaiting compaction for the given dataSource. The specified dataSource must have [automatic compaction](../data-management/automatic-compaction.md) enabled.
 
-
-
 `GET /druid/coordinator/v1/compaction/status`
 
 Returns the status and statistics from the auto-compaction run of all dataSources which have auto-compaction enabled in the latest run. The response payload includes a list of `latestStatus` objects. Each `latestStatus` represents the status for a dataSource (which has/had auto-compaction enabled).
+
 The `latestStatus` object has the following keys:
 * `dataSource`: name of the datasource for this status information
 * `scheduleStatus`: auto-compaction scheduling status. Possible values are `NOT_ENABLED` and `RUNNING`. Returns `RUNNING ` if the dataSource has an active auto-compaction config submitted. Otherwise, returns `NOT_ENABLED`.
@@ -457,8 +453,8 @@ The `latestStatus` object has the following keys:
 
 `GET /druid/coordinator/v1/compaction/status?dataSource={dataSource}`
 
-Similar to the API `/druid/coordinator/v1/compaction/status` above but filters response to only return information for the {dataSource} given. 
-Note that {dataSource} given must have/had auto-compaction enabled.
+Similar to the API `/druid/coordinator/v1/compaction/status` above but filters response to only return information for the dataSource given.
+The dataSource must have auto-compaction enabled.
 
 #### Automatic compaction configuration
 
@@ -525,14 +521,14 @@ Returns the current leader Overlord of the cluster. If you have multiple Overlor
 
 `GET /druid/indexer/v1/isLeader`
 
-This returns a JSON object with field "leader", either true or false. In addition, this call returns HTTP 200 if the
+This returns a JSON object with field `leader`, either true or false. In addition, this call returns HTTP 200 if the
 server is the current leader and HTTP 404 if not. This is suitable for use as a load balancer status check if you
 only want the active leader to be considered in-service at the load balancer.
 
 #### Tasks
 
 Note that all _interval_ URL parameters are ISO 8601 strings delimited by a `_` instead of a `/`
-(e.g., 2016-06-27_2016-06-28).
+as in `2016-06-27_2016-06-28`.
 
 `GET /druid/indexer/v1/tasks`
 
@@ -618,9 +614,9 @@ Returns a list of objects of the currently active supervisors.
 |---|---|---|
 |`id`|String|supervisor unique identifier|
 |`state`|String|basic state of the supervisor. Available states:`UNHEALTHY_SUPERVISOR`, `UNHEALTHY_TASKS`, `PENDING`, `RUNNING`, `SUSPENDED`, `STOPPING`. Check [Kafka Docs](../development/extensions-core/kafka-supervisor-operations.md) for details.|
-|`detailedState`|String|supervisor specific state. (See documentation of specific supervisor for details), e.g. [Kafka](../development/extensions-core/kafka-ingestion.md) or [Kinesis](../development/extensions-core/kinesis-ingestion.md))|
+|`detailedState`|String|supervisor specific state. See documentation of specific supervisor for details: [Kafka](../development/extensions-core/kafka-ingestion.md) or [Kinesis](../development/extensions-core/kinesis-ingestion.md)|
 |`healthy`|Boolean|true or false indicator of overall supervisor health|
-|`spec`|SupervisorSpec|json specification of supervisor (See Supervisor Configuration for details)|
+|`spec`|SupervisorSpec|JSON specification of supervisor|
 
 `GET /druid/indexer/v1/supervisor?state=true`
 
@@ -630,7 +626,7 @@ Returns a list of objects of the currently active supervisors and their current
 |---|---|---|
 |`id`|String|supervisor unique identifier|
 |`state`|String|basic state of the supervisor. Available states: `UNHEALTHY_SUPERVISOR`, `UNHEALTHY_TASKS`, `PENDING`, `RUNNING`, `SUSPENDED`, `STOPPING`. Check [Kafka Docs](../development/extensions-core/kafka-supervisor-operations.md) for details.|
-|`detailedState`|String|supervisor specific state. (See documentation of the specific supervisor for details, e.g. [Kafka](../development/extensions-core/kafka-ingestion.md) or [Kinesis](../development/extensions-core/kinesis-ingestion.md))|
+|`detailedState`|String|supervisor specific state. See documentation of the specific supervisor for details: [Kafka](../development/extensions-core/kafka-ingestion.md) or [Kinesis](../development/extensions-core/kinesis-ingestion.md)|
 |`healthy`|Boolean|true or false indicator of overall supervisor health|
 |`suspended`|Boolean|true or false indicator of whether the supervisor is in suspended state|
 
@@ -685,7 +681,7 @@ Terminate all supervisors at once.
 `POST /druid/indexer/v1/supervisor/<supervisorId>/shutdown`
 
 > This API is deprecated and will be removed in future releases.
-> Please use the equivalent 'terminate' instead.
+> Please use the equivalent `terminate` instead.
 
 Shutdown a supervisor.
 
@@ -694,7 +690,7 @@ Shutdown a supervisor.
 See [Overlord Dynamic Configuration](../configuration/index.md#overlord-dynamic-configuration) for details.
 
 Note that all _interval_ URL parameters are ISO 8601 strings delimited by a `_` instead of a `/`
-(e.g., 2016-06-27_2016-06-28).
+as in `2016-06-27_2016-06-28`.
 
 `GET /druid/indexer/v1/worker`
 
@@ -735,7 +731,7 @@ and `druid.port` with the boolean state as the value.
 
 `GET /druid/worker/v1/tasks`
 
-Retrieve a list of active tasks being run on MiddleManager. Returns JSON list of taskid strings.  Normal usage should
+Retrieve a list of active tasks being run on MiddleManager. Returns JSON list of taskid strings. Normal usage should
 prefer to use the `/druid/indexer/v1/tasks` [Overlord API](#overlord) or one of it's task state specific variants instead.
 
 ```json
@@ -810,7 +806,7 @@ This section documents the API endpoints for the processes that reside on Query
 #### Datasource information
 
 Note that all _interval_ URL parameters are ISO 8601 strings delimited by a `_` instead of a `/`
-(e.g., 2016-06-27_2016-06-28).
+as in `2016-06-27_2016-06-28`.
 
 > Note: Much of this information is available in a simpler, easier-to-use form through the Druid SQL
 > [`INFORMATION_SCHEMA.TABLES`](../querying/sql-metadata-tables.md#tables-table),
diff --git a/docs/querying/sql-api.md b/docs/api-reference/sql-api.md
similarity index 90%
rename from docs/querying/sql-api.md
rename to docs/api-reference/sql-api.md
index a425b713a3..54cc3042d0 100644
--- a/docs/querying/sql-api.md
+++ b/docs/api-reference/sql-api.md
@@ -1,7 +1,7 @@
 ---
 id: sql-api
-title: "Druid SQL API"
-sidebar_label: "Druid SQL API"
+title: Druid SQL API
+sidebar_label: Druid SQL
 ---
 
 <!--
@@ -23,10 +23,10 @@ sidebar_label: "Druid SQL API"
   ~ under the License.
   -->
 
-> Apache Druid supports two query languages: Druid SQL and [native queries](querying.md).
+> Apache Druid supports two query languages: Druid SQL and [native queries](../querying/querying.md).
 > This document describes the SQL language.
 
-You can submit and cancel [Druid SQL](./sql.md) queries using the Druid SQL API.
+You can submit and cancel [Druid SQL](../querying/sql.md) queries using the Druid SQL API.
 The Druid SQL API is available at `https://ROUTER:8888/druid/v2/sql`, where `ROUTER` is the IP address of the Druid Router.
 
 ## Submit a query
@@ -50,8 +50,8 @@ Submit your query as the value of a "query" field in the JSON object within the
 |`header`|Whether or not to include a header row for the query result. See [Responses](#responses) for details.|`false`|
 |`typesHeader`|Whether or not to include type information in the header. Can only be set when `header` is also `true`. See [Responses](#responses) for details.|`false`|
 |`sqlTypesHeader`|Whether or not to include SQL type information in the header. Can only be set when `header` is also `true`. See [Responses](#responses) for details.|`false`|
-|`context`|JSON object containing [SQL query context parameters](sql-query-context.md).|`{}` (empty)|
-|`parameters`|List of query parameters for parameterized queries. Each parameter in the list should be a JSON object like `{"type": "VARCHAR", "value": "foo"}`. The type should be a SQL type; see [Data types](sql-data-types.md) for a list of supported SQL types.|`[]` (empty)|
+|`context`|JSON object containing [SQL query context parameters](../querying/sql-query-context.md).|`{}` (empty)|
+|`parameters`|List of query parameters for parameterized queries. Each parameter in the list should be a JSON object like `{"type": "VARCHAR", "value": "foo"}`. The type should be a SQL type; see [Data types](../querying/sql-data-types.md) for a list of supported SQL types.|`[]` (empty)|
 
 You can use _curl_ to send SQL queries from the command-line:
 
@@ -63,7 +63,7 @@ $ curl -XPOST -H'Content-Type: application/json' http://ROUTER:8888/druid/v2/sql
 [{"TheCount":24433}]
 ```
 
-There are a variety of [SQL query context parameters](sql-query-context.md) you can provide by adding a "context" map,
+There are a variety of [SQL query context parameters](../querying/sql-query-context.md) you can provide by adding a "context" map,
 like:
 
 ```json
@@ -87,14 +87,13 @@ Parameterized SQL queries are also supported:
 }
 ```
 
-Metadata is available over HTTP POST by querying [metadata tables](sql-metadata-tables.md).
+Metadata is available over HTTP POST by querying [metadata tables](../querying/sql-metadata-tables.md).
 
 ### Responses
 
 #### Result formats
 
-Druid SQL's HTTP POST API supports a variety of result formats. You can specify these by adding a "resultFormat"
-parameter, like:
+Druid SQL's HTTP POST API supports a variety of result formats. You can specify these by adding a `resultFormat` parameter, like:
 
 ```json
 {
@@ -105,7 +104,7 @@ parameter, like:
 
 To request a header with information about column names, set `header` to true in your request.
 When you set `header` to true, you can optionally include `typesHeader` and `sqlTypesHeader` as well, which gives
-you information about [Druid runtime and SQL types](sql-data-types.md) respectively. You can request all these headers
+you information about [Druid runtime and SQL types](../querying/sql-data-types.md) respectively. You can request all these headers
 with a request like:
 
 ```json
@@ -128,10 +127,10 @@ The following table shows supported result formats:
 |`arrayLines`|Like `array`, but the JSON arrays are separated by newlines instead of being wrapped in a JSON array. This can make it easier to parse the entire response set as a stream, if you do not have ready access to a streaming JSON parser. To make it possible to detect a truncated response, this format includes a trailer of one blank line.|Same as `array`, except the rows are separated by newlines.|text/plain|
 |`csv`|Comma-separated values, with one row per line. Individual field values may be escaped by being surrounded in double quotes. If double quotes appear in a field value, they will be escaped by replacing them with double-double-quotes like `""this""`. To make it possible to detect a truncated response, this format includes a trailer of one blank line.|Same as `array`, except the lists are in CSV format.|text/csv|
 
-If `typesHeader` is set to true, [Druid type](sql-data-types.md) information is included in the response. Complex types,
+If `typesHeader` is set to true, [Druid type](../querying/sql-data-types.md) information is included in the response. Complex types,
 like sketches, will be reported as `COMPLEX<typeName>` if a particular complex type name is known for that field,
 or as `COMPLEX` if the particular type name is unknown or mixed. If `sqlTypesHeader` is set to true,
-[SQL type](sql-data-types.md) information is included in the response. It is possible to set both `typesHeader` and
+[SQL type](../querying/sql-data-types.md) information is included in the response. It is possible to set both `typesHeader` and
 `sqlTypesHeader` at once. Both parameters require that `header` is also set.
 
 To aid in building clients that are compatible with older Druid versions, Druid returns the HTTP header
@@ -140,7 +139,7 @@ understands the `typesHeader` and `sqlTypesHeader` parameters. This HTTP respons
 whether `typesHeader` or `sqlTypesHeader` are set or not.
 
 Druid returns the SQL query identifier in the `X-Druid-SQL-Query-Id` HTTP header.
-This query id will be assigned the value of `sqlQueryId` from the [query context parameters](sql-query-context.md)
+This query id will be assigned the value of `sqlQueryId` from the [query context parameters](../querying/sql-query-context.md)
 if specified, else Druid will generate a SQL query id for you.
 
 #### Errors
@@ -179,7 +178,7 @@ You can cancel the query using the query id `myQuery01` as follows:
 curl --request DELETE 'https://ROUTER:8888/druid/v2/sql/myQuery01' \
 ```
 
-Cancellation requests require READ permission on all resources used in the sql query. 
+Cancellation requests require READ permission on all resources used in the SQL query. 
 
 Druid returns an HTTP 202 response for successful deletion requests.
 
diff --git a/docs/multi-stage-query/api.md b/docs/api-reference/sql-ingestion-api.md
similarity index 97%
rename from docs/multi-stage-query/api.md
rename to docs/api-reference/sql-ingestion-api.md
index 19e1e11c4c..a9cceb8d4d 100644
--- a/docs/multi-stage-query/api.md
+++ b/docs/api-reference/sql-ingestion-api.md
@@ -1,7 +1,7 @@
 ---
-id: api
-title: SQL-based ingestion and multi-stage query task API
-sidebar_label: API
+id: sql-ingestion-api
+title: SQL-based ingestion API
+sidebar_label: SQL-based ingestion
 ---
 
 <!--
@@ -34,7 +34,7 @@ interface.
 When using the API for the MSQ task engine, the action you want to take determines the endpoint you use:
 
 - `/druid/v2/sql/task` endpoint: Submit a query for ingestion.
-- `/druid/indexer/v1/task` endpoint: Interact with a query, including getting its status, getting its details, or canceling it. This page describes a few of the Overlord Task APIs that you can use with the MSQ task engine. For information about Druid APIs, see the [API reference for Druid](../operations/api-reference.md#tasks).
+- `/druid/indexer/v1/task` endpoint: Interact with a query, including getting its status, getting its details, or canceling it. This page describes a few of the Overlord Task APIs that you can use with the MSQ task engine. For information about Druid APIs, see the [API reference for Druid](../ingestion/tasks.md).
 
 ## Submit a query
 
@@ -42,11 +42,11 @@ You submit queries to the MSQ task engine using the `POST /druid/v2/sql/task/` e
 
 #### Request
 
-The SQL task endpoint accepts [SQL requests in the JSON-over-HTTP form](../querying/sql-api.md#request-body) using the
+The SQL task endpoint accepts [SQL requests in the JSON-over-HTTP form](sql-api.md#request-body) using the
 `query`, `context`, and `parameters` fields, but ignoring the `resultFormat`, `header`, `typesHeader`, and
 `sqlTypesHeader` fields.
 
-This endpoint accepts [INSERT](reference.md#insert) and [REPLACE](reference.md#replace) statements.
+This endpoint accepts [INSERT](../multi-stage-query/reference.md#insert) and [REPLACE](../multi-stage-query/reference.md#replace) statements.
 
 As an experimental feature, this endpoint also accepts SELECT queries. SELECT query results are collected from workers
 by the controller, and written into the [task report](#get-the-report-for-a-query-task) as an array of arrays. The
@@ -123,7 +123,7 @@ print(response.text)
 
 | Field | Description |
 |---|---|
-| `taskId` | Controller task ID. You can use Druid's standard [task APIs](../operations/api-reference.md#overlord) to interact with this controller task. |
+| `taskId` | Controller task ID. You can use Druid's standard [task APIs](api-reference.md#overlord) to interact with this controller task. |
 | `state` | Initial state for the query, which is "RUNNING". |
 
 ## Get the status for a query task
@@ -564,8 +564,8 @@ The following table describes the response fields when you retrieve a report for
 | `multiStageQuery.payload.status.errorReport.taskId` | The task that reported the error, if known. May be a controller task or a worker task. |
 | `multiStageQuery.payload.status.errorReport.host` | The hostname and port of the task that reported the error, if known. |
 | `multiStageQuery.payload.status.errorReport.stageNumber` | The stage number that reported the error, if it happened during execution of a specific stage. |
-| `multiStageQuery.payload.status.errorReport.error` | Error object. Contains `errorCode` at a minimum, and may contain other fields as described in the [error code table](./reference.md#error-codes). Always present if there is an error. |
-| `multiStageQuery.payload.status.errorReport.error.errorCode` | One of the error codes from the [error code table](./reference.md#error-codes). Always present if there is an error. |
+| `multiStageQuery.payload.status.errorReport.error` | Error object. Contains `errorCode` at a minimum, and may contain other fields as described in the [error code table](../multi-stage-query/reference.md#error-codes). Always present if there is an error. |
+| `multiStageQuery.payload.status.errorReport.error.errorCode` | One of the error codes from the [error code table](../multi-stage-query/reference.md#error-codes). Always present if there is an error. |
 | `multiStageQuery.payload.status.errorReport.error.errorMessage` | User-friendly error message. Not always present, even if there is an error. |
 | `multiStageQuery.payload.status.errorReport.exceptionStackTrace` | Java stack trace in string form, if the error was due to a server-side exception. |
 | `multiStageQuery.payload.stages` | Array of query stages. |
diff --git a/docs/querying/sql-jdbc.md b/docs/api-reference/sql-jdbc.md
similarity index 89%
rename from docs/querying/sql-jdbc.md
rename to docs/api-reference/sql-jdbc.md
index a558637bc1..82b2c2012b 100644
--- a/docs/querying/sql-jdbc.md
+++ b/docs/api-reference/sql-jdbc.md
@@ -1,7 +1,7 @@
 ---
 id: sql-jdbc
-title: "SQL JDBC driver API"
-sidebar_label: "JDBC driver API"
+title: SQL JDBC driver API
+sidebar_label: SQL JDBC driver
 ---
 
 <!--
@@ -23,11 +23,11 @@ sidebar_label: "JDBC driver API"
   ~ under the License.
   -->
 
-> Apache Druid supports two query languages: Druid SQL and [native queries](querying.md).
+> Apache Druid supports two query languages: Druid SQL and [native queries](../querying/querying.md).
 > This document describes the SQL language.
 
 
-You can make [Druid SQL](./sql.md) queries using the [Avatica JDBC driver](https://calcite.apache.org/avatica/downloads/). We recommend using Avatica JDBC driver version 1.17.0 or later. Note that as of the time of this writing, Avatica 1.17.0, the latest version, does not support passing connection string parameters from the URL to Druid, so you must pass them using a `Properties` object. Once you've downloaded the Avatica client jar, add it to your classpath and use the connect string  [...]
+You can make [Druid SQL](../querying/sql.md) queries using the [Avatica JDBC driver](https://calcite.apache.org/avatica/downloads/). We recommend using Avatica JDBC driver version 1.17.0 or later. Note that as of the time of this writing, Avatica 1.17.0, the latest version, does not support passing connection string parameters from the URL to Druid, so you must pass them using a `Properties` object. Once you've downloaded the Avatica client jar, add it to your classpath and use the conne [...]
 
 When using the JDBC connector for the [examples](#examples) or in general, it's helpful to understand the parts of the connect string stored in the `url` variable:
 
@@ -60,7 +60,7 @@ try (Connection connection = DriverManager.getConnection(url, connectionProperti
 For a runnable example that includes a query that you might run, see [Examples](#examples).
 
 It is also possible to use a protocol buffers JDBC connection with Druid, this offer reduced bloat and potential performance
-improvements for larger result sets. To use it apply the following connection url instead, everything else remains the same
+improvements for larger result sets. To use it apply the following connection URL instead, everything else remains the same
 ```
 String url = "jdbc:avatica:remote:url=http://localhost:8082/druid/v2/sql/avatica-protobuf/;serialization=protobuf";
 ```
@@ -68,7 +68,7 @@ String url = "jdbc:avatica:remote:url=http://localhost:8082/druid/v2/sql/avatica
 > The protobuf endpoint is also known to work with the official [Golang Avatica driver](https://github.com/apache/calcite-avatica-go)
 
 Table metadata is available over JDBC using `connection.getMetaData()` or by querying the
-["INFORMATION_SCHEMA" tables](sql-metadata-tables.md). For an example of this, see [Get the metadata for a datasource](#get-the-metadata-for-a-datasource).
+[INFORMATION_SCHEMA tables](../querying/sql-metadata-tables.md). For an example of this, see [Get the metadata for a datasource](#get-the-metadata-for-a-datasource).
 
 ## Connection stickiness
 
@@ -82,7 +82,7 @@ Note that the non-JDBC [JSON over HTTP](sql-api.md#submit-a-query) API is statel
 
 ## Dynamic parameters
 
-You can use [parameterized queries](sql.md#dynamic-parameters) in JDBC code, as in this example:
+You can use [parameterized queries](../querying/sql.md#dynamic-parameters) in JDBC code, as in this example:
 
 ```java
 PreparedStatement statement = connection.prepareStatement("SELECT COUNT(*) AS cnt FROM druid.foo WHERE dim1 = ? OR dim1 = ?");
diff --git a/docs/development/extensions.md b/docs/configuration/extensions.md
similarity index 93%
rename from docs/development/extensions.md
rename to docs/configuration/extensions.md
index 36d3549b19..3a2844221c 100644
--- a/docs/development/extensions.md
+++ b/docs/configuration/extensions.md
@@ -96,7 +96,7 @@ All of these community extensions can be downloaded using [pull-deps](../operati
 |druid-momentsketch|Support for approximate quantile queries using the [momentsketch](https://github.com/stanford-futuredata/momentsketch) library|[link](../development/extensions-contrib/momentsketch-quantiles.md)|
 |druid-tdigestsketch|Support for approximate sketch aggregators based on [T-Digest](https://github.com/tdunning/t-digest)|[link](../development/extensions-contrib/tdigestsketch-quantiles.md)|
 |gce-extensions|GCE Extensions|[link](../development/extensions-contrib/gce-extensions.md)|
-|prometheus-emitter|Exposes [Druid metrics](../operations/metrics.md) for Prometheus server collection (https://prometheus.io/)|[link](./extensions-contrib/prometheus.md)|
+|prometheus-emitter|Exposes [Druid metrics](../operations/metrics.md) for Prometheus server collection (https://prometheus.io/)|[link](../development/extensions-contrib/prometheus.md)|
 |kubernetes-overlord-extensions|Support for launching tasks in k8s without Middle Managers|[link](../development/extensions-contrib/k8s-jobs.md)|
 
 ## Promoting community extensions to core extensions
@@ -111,11 +111,11 @@ For information how to create your own extension, please see [here](../developme
 
 ### Loading core extensions
 
-Apache Druid bundles all [core extensions](../development/extensions.md#core-extensions) out of the box.
-See the [list of extensions](../development/extensions.md#core-extensions) for your options. You
+Apache Druid bundles all [core extensions](../configuration/extensions.md#core-extensions) out of the box.
+See the [list of extensions](../configuration/extensions.md#core-extensions) for your options. You
 can load bundled extensions by adding their names to your common.runtime.properties
-`druid.extensions.loadList` property. For example, to load the *postgresql-metadata-storage* and
-*druid-hdfs-storage* extensions, use the configuration:
+`druid.extensions.loadList` property. For example, to load the postgresql-metadata-storage and
+druid-hdfs-storage extensions, use the configuration:
 
 ```
 druid.extensions.loadList=["postgresql-metadata-storage", "druid-hdfs-storage"]
@@ -125,7 +125,7 @@ These extensions are located in the `extensions` directory of the distribution.
 
 > Druid bundles two sets of configurations: one for the [quickstart](../tutorials/index.md) and
 > one for a [clustered configuration](../tutorials/cluster.md). Make sure you are updating the correct
-> common.runtime.properties for your setup.
+> `common.runtime.properties` for your setup.
 
 > Because of licensing, the mysql-metadata-storage extension does not include the required MySQL JDBC driver. For instructions
 > on how to install this library, see the [MySQL extension page](../development/extensions-core/mysql.md).
@@ -153,11 +153,11 @@ You only have to install the extension once. Then, add `"druid-example-extension
 
 > Please make sure all the Extensions related configuration properties listed [here](../configuration/index.md#extensions) are set correctly.
 
-> The Maven groupId for almost every [community extension](../development/extensions.md#community-extensions) is org.apache.druid.extensions.contrib. The artifactId is the name
+> The Maven `groupId` for almost every [community extension](../configuration/extensions.md#community-extensions) is `org.apache.druid.extensions.contrib`. The `artifactId` is the name
 > of the extension, and the version is the latest Druid stable version.
 
 ### Loading extensions from the classpath
 
-If you add your extension jar to the classpath at runtime, Druid will also load it into the system.  This mechanism is relatively easy to reason about,
-but it also means that you have to ensure that all dependency jars on the classpath are compatible.  That is, Druid makes no provisions while using
+If you add your extension jar to the classpath at runtime, Druid will also load it into the system. This mechanism is relatively easy to reason about,
+but it also means that you have to ensure that all dependency jars on the classpath are compatible. That is, Druid makes no provisions while using
 this method to maintain class loader isolation so you must make sure that the jars on your classpath are mutually compatible.
diff --git a/docs/configuration/index.md b/docs/configuration/index.md
index 42542c35ea..074aa47b95 100644
--- a/docs/configuration/index.md
+++ b/docs/configuration/index.md
@@ -245,7 +245,7 @@ values for the above mentioned configs among others provided by Java implementat
 |`druid.auth.unsecuredPaths`| List of Strings|List of paths for which security checks will not be performed. All requests to these paths will be allowed.|[]|no|
 |`druid.auth.allowUnauthenticatedHttpOptions`|Boolean|If true, skip authentication checks for HTTP OPTIONS requests. This is needed for certain use cases, such as supporting CORS pre-flight requests. Note that disabling authentication checks for OPTIONS requests will allow unauthenticated users to determine what Druid endpoints are valid (by checking if the OPTIONS request returns a 200 instead of 404), so enabling this option may reveal information about server configuration, including  [...]
 
-For more information, please see [Authentication and Authorization](../design/auth.md).
+For more information, please see [Authentication and Authorization](../operations/auth.md).
 
 For configuration options for specific auth extensions, please refer to the extension documentation.
 
@@ -581,7 +581,7 @@ This deep storage is used to interface with Cassandra.  Note that the `druid-cas
 #### HDFS input source
 
 You can set the following property to specify permissible protocols for
-the [HDFS input source](../ingestion/native-batch-input-source.md#hdfs-input-source).
+the [HDFS input source](../ingestion/input-sources.md#hdfs-input-source).
 
 |Property|Possible Values|Description|Default|
 |--------|---------------|-----------|-------|
@@ -591,7 +591,7 @@ the [HDFS input source](../ingestion/native-batch-input-source.md#hdfs-input-sou
 #### HTTP input source
 
 You can set the following property to specify permissible protocols for
-the [HTTP input source](../ingestion/native-batch-input-source.md#http-input-source).
+the [HTTP input source](../ingestion/input-sources.md#http-input-source).
 
 |Property|Possible Values|Description|Default|
 |--------|---------------|-----------|-------|
@@ -603,7 +603,7 @@ the [HTTP input source](../ingestion/native-batch-input-source.md#http-input-sou
 #### JDBC Connections to External Databases
 
 You can use the following properties to specify permissible JDBC options for:
-- [SQL input source](../ingestion/native-batch-input-source.md#sql-input-source)
+- [SQL input source](../ingestion/input-sources.md#sql-input-source)
 - [globally cached JDBC lookups](../development/extensions-core/lookups-cached-global.md#jdbc-lookup)
 - [JDBC Data Fetcher for per-lookup caching](../development/extensions-core/druid-lookups.md#data-fetcher-layer).
 
@@ -998,7 +998,7 @@ These configuration options control Coordinator lookup management. See [dynamic
 ##### Automatic compaction dynamic configuration
 
 You can set or update [automatic compaction](../data-management/automatic-compaction.md) properties dynamically using the
-[Coordinator API](../operations/api-reference.md#automatic-compaction-configuration) without restarting Coordinators.
+[Coordinator API](../api-reference/api-reference.md#automatic-compaction-configuration) without restarting Coordinators.
 
 For details about segment compaction, see [Segment size optimization](../operations/segment-optimization.md).
 
@@ -1525,7 +1525,7 @@ Additional peon configs include:
 |`druid.indexer.task.gracefulShutdownTimeout`|Wait this long on middleManager restart for restorable tasks to gracefully exit.|PT5M|
 |`druid.indexer.task.hadoopWorkingPath`|Temporary working directory for Hadoop tasks.|`/tmp/druid-indexing`|
 |`druid.indexer.task.restoreTasksOnRestart`|If true, MiddleManagers will attempt to stop tasks gracefully on shutdown and restore them on restart.|false|
-|`druid.indexer.task.ignoreTimestampSpecForDruidInputSource`|If true, tasks using the [Druid input source](../ingestion/native-batch-input-source.md) will ignore the provided timestampSpec, and will use the `__time` column of the input datasource. This option is provided for compatibility with ingestion specs written before Druid 0.22.0.|false|
+|`druid.indexer.task.ignoreTimestampSpecForDruidInputSource`|If true, tasks using the [Druid input source](../ingestion/input-sources.md) will ignore the provided timestampSpec, and will use the `__time` column of the input datasource. This option is provided for compatibility with ingestion specs written before Druid 0.22.0.|false|
 |`druid.indexer.task.storeEmptyColumns`|Boolean value for whether or not to store empty columns during ingestion. When set to true, Druid stores every column specified in the [`dimensionsSpec`](../ingestion/ingestion-spec.md#dimensionsspec). If you use the string-based schemaless ingestion and don't specify any dimensions to ingest, you must also set [`includeAllDimensions`](../ingestion/ingestion-spec.md#dimensionsspec) for Druid to store empty columns.<br/><br/>If you set `storeEmptyCo [...]
 |`druid.indexer.task.tmpStorageBytesPerTask`|Maximum number of bytes per task to be used to store temporary files on disk. This config is generally intended for internal usage.  Attempts to set it are very likely to be overwritten by the TaskRunner that executes the task, so be sure of what you expect to happen before directly adjusting this configuration parameter.  The config is documented here primarily to provide an understanding of what it means if/when someone sees that it has been [...]
 |`druid.indexer.server.maxChatRequests`|Maximum number of concurrent requests served by a task's chat handler. Set to 0 to disable limiting.|0|
@@ -1594,9 +1594,8 @@ then the value from the configuration below is used:
 |`druid.indexer.task.gracefulShutdownTimeout`|Wait this long on Indexer restart for restorable tasks to gracefully exit.|PT5M|
 |`druid.indexer.task.hadoopWorkingPath`|Temporary working directory for Hadoop tasks.|`/tmp/druid-indexing`|
 |`druid.indexer.task.restoreTasksOnRestart`|If true, the Indexer will attempt to stop tasks gracefully on shutdown and restore them on restart.|false|
-|`druid.indexer.task.ignoreTimestampSpecForDruidInputSource`|If true, tasks using the [Druid input source](../ingestion/native-batch-input-source.md) will ignore the provided timestampSpec, and will use the `__time` column of the input datasource. This option is provided for compatibility with ingestion specs written before Druid 0.22.0.|false|
-|`druid.indexer.task.storeEmptyColumns`|Boolean value for whether or not to store empty columns during ingestion. When set to true, Druid stores every column specified in the [`dimensionsSpec`](../ingestion/ingestion-spec.md#dimensionsspec). <br/><br/>If you set `storeEmptyColumns` to false, Druid SQL queries referencing empty columns will fail. If you intend to leave `storeEmptyColumns` disabled, you should either ingest placeholder data for empty columns or else not query on empty colu [...]
-|`druid.peon.taskActionClient.retry.minWait`|The minimum retry time to communicate with Overlord.|PT5S|
+|`druid.indexer.task.ignoreTimestampSpecForDruidInputSource`|If true, tasks using the [Druid input source](../ingestion/input-sources.md) will ignore the provided timestampSpec, and will use the `__time` column of the input datasource. This option is provided for compatibility with ingestion specs written before Druid 0.22.0.|false|
+|`druid.indexer.task.storeEmptyColumns`|Boolean value for whether or not to store empty columns during ingestion. When set to true, Druid stores every column specified in the [`dimensionsSpec`](../ingestion/ingestion-spec.md#dimensionsspec). <br/><br/>If you set `storeEmptyColumns` to false, Druid SQL queries referencing empty columns will fail. If you intend to leave `storeEmptyColumns` disabled, you should either ingest placeholder data for empty columns or else not query on empty colu [...]
 |`druid.peon.taskActionClient.retry.maxWait`|The maximum retry time to communicate with Overlord.|PT1M|
 |`druid.peon.taskActionClient.retry.maxRetryCount`|The maximum number of retries to communicate with Overlord.|60|
 
@@ -2245,7 +2244,7 @@ Supported query contexts:
 
 |Key|Description|Default|
 |---|-----------|-------|
-|`druid.expressions.useStrictBooleans`|Controls the behavior of Druid boolean operators and functions, if set to `true` all boolean values will be either a `1` or `0`. See [expression documentation](../misc/math-expr.md#logical-operator-modes)|false|
+|`druid.expressions.useStrictBooleans`|Controls the behavior of Druid boolean operators and functions, if set to `true` all boolean values will be either a `1` or `0`. See [expression documentation](../querying/math-expr.md#logical-operator-modes)|false|
 |`druid.expressions.allowNestedArrays`|If enabled, Druid array expressions can create nested arrays.|false|
 ### Router
 
diff --git a/docs/data-management/automatic-compaction.md b/docs/data-management/automatic-compaction.md
index 5f5a76eebe..866ca2407d 100644
--- a/docs/data-management/automatic-compaction.md
+++ b/docs/data-management/automatic-compaction.md
@@ -40,7 +40,7 @@ This topic guides you through setting up automatic compaction for your Druid clu
 ## Enable automatic compaction
 
 You can enable automatic compaction for a datasource using the web console or programmatically via an API.
-This process differs for manual compaction tasks, which can be submitted from the [Tasks view of the web console](../operations/web-console.md) or the [Tasks API](../operations/api-reference.md#tasks).
+This process differs for manual compaction tasks, which can be submitted from the [Tasks view of the web console](../operations/web-console.md) or the [Tasks API](../api-reference/api-reference.md#tasks).
 
 ### web console
 
@@ -59,10 +59,10 @@ To disable auto-compaction for a datasource, click **Delete** from the **Compact
 
 ### Compaction configuration API
 
-Use the [Coordinator API](../operations/api-reference.md#automatic-compaction-status) to configure automatic compaction.
+Use the [Coordinator API](../api-reference/api-reference.md#automatic-compaction-status) to configure automatic compaction.
 To enable auto-compaction for a datasource, create a JSON object with the desired auto-compaction settings.
 See [Configure automatic compaction](#configure-automatic-compaction) for the syntax of an auto-compaction spec.
-Send the JSON object as a payload in a [`POST` request](../operations/api-reference.md#automatic-compaction-configuration) to `/druid/coordinator/v1/config/compaction`.
+Send the JSON object as a payload in a [`POST` request](../api-reference/api-reference.md#automatic-compaction-configuration) to `/druid/coordinator/v1/config/compaction`.
 The following example configures auto-compaction for the `wikipedia` datasource:
 
 ```sh
@@ -76,7 +76,7 @@ curl --location --request POST 'http://localhost:8081/druid/coordinator/v1/confi
 }'
 ```
 
-To disable auto-compaction for a datasource, send a [`DELETE` request](../operations/api-reference.md#automatic-compaction-configuration) to `/druid/coordinator/v1/config/compaction/{dataSource}`. Replace `{dataSource}` with the name of the datasource for which to disable auto-compaction. For example:
+To disable auto-compaction for a datasource, send a [`DELETE` request](../api-reference/api-reference.md#automatic-compaction-configuration) to `/druid/coordinator/v1/config/compaction/{dataSource}`. Replace `{dataSource}` with the name of the datasource for which to disable auto-compaction. For example:
 
 ```sh
 curl --location --request DELETE 'http://localhost:8081/druid/coordinator/v1/config/compaction/wikipedia'
@@ -152,7 +152,7 @@ After the Coordinator has initiated auto-compaction, you can view compaction sta
 
 In the web console, the Datasources view displays auto-compaction statistics. The Tasks view shows the task information for compaction tasks that were triggered by the automatic compaction system.
 
-To get statistics by API, send a [`GET` request](../operations/api-reference.md#automatic-compaction-status) to `/druid/coordinator/v1/compaction/status`. To filter the results to a particular datasource, pass the datasource name as a query parameter to the requestβ€”for example, `/druid/coordinator/v1/compaction/status?dataSource=wikipedia`.
+To get statistics by API, send a [`GET` request](../api-reference/api-reference.md#automatic-compaction-status) to `/druid/coordinator/v1/compaction/status`. To filter the results to a particular datasource, pass the datasource name as a query parameter to the requestβ€”for example, `/druid/coordinator/v1/compaction/status?dataSource=wikipedia`.
 
 ## Examples
 
diff --git a/docs/data-management/compaction.md b/docs/data-management/compaction.md
index a4264c160e..3e833469e8 100644
--- a/docs/data-management/compaction.md
+++ b/docs/data-management/compaction.md
@@ -136,7 +136,7 @@ To control the number of result segments per time chunk, you can set [`maxRowsPe
 
 > You can run multiple compaction tasks in parallel. For example, if you want to compact the data for a year, you are not limited to running a single task for the entire year. You can run 12 compaction tasks with month-long intervals.
 
-A compaction task internally generates an `index` or `index_parallel` task spec for performing compaction work with some fixed parameters. For example, its `inputSource` is always the [`druid` input source](../ingestion/native-batch-input-source.md), and `dimensionsSpec` and `metricsSpec` include all dimensions and metrics of the input segments by default.
+A compaction task internally generates an `index` or `index_parallel` task spec for performing compaction work with some fixed parameters. For example, its `inputSource` is always the [`druid` input source](../ingestion/input-sources.md), and `dimensionsSpec` and `metricsSpec` include all dimensions and metrics of the input segments by default.
 
 Compaction tasks fetch all [relevant segments](#compaction-io-configuration) prior to launching any subtasks, _unless_ the following items are all set. It is strongly recommended to set all of these items to maximize performance and minimize disk usage of the `compact` task:
 
diff --git a/docs/data-management/delete.md b/docs/data-management/delete.md
index 361c7873cc..ebabd69c4d 100644
--- a/docs/data-management/delete.md
+++ b/docs/data-management/delete.md
@@ -38,7 +38,7 @@ Deletion by time range happens in two steps:
    you have a backup.
 
 For documentation on disabling segments using the Coordinator API, see the
-[Coordinator API reference](../operations/api-reference.md#coordinator-datasources).
+[Coordinator API reference](../api-reference/api-reference.md#coordinator-datasources).
 
 A data deletion tutorial is available at [Tutorial: Deleting data](../tutorials/tutorial-delete-data.md).
 
@@ -65,7 +65,7 @@ For example, to delete records where `userName` is `'bob'` with native batch ind
 To delete the same records using SQL, use [REPLACE](../multi-stage-query/concepts.md#replace) with `WHERE userName <> 'bob'`.
 
 To reindex using [native batch](../ingestion/native-batch.md), use the [`druid` input
-source](../ingestion/native-batch-input-source.md#druid-input-source). If needed,
+source](../ingestion/input-sources.md#druid-input-source). If needed,
 [`transformSpec`](../ingestion/ingestion-spec.md#transformspec) can be used to filter or modify data during the
 reindexing job. To reindex with SQL, use [`REPLACE <table> OVERWRITE`](../multi-stage-query/reference.md#replace)
 with `SELECT ... FROM <table>`. (Druid does not have `UPDATE` or `ALTER TABLE` statements.) Any SQL SELECT query can be
diff --git a/docs/data-management/update.md b/docs/data-management/update.md
index 070aaf3489..74508d0acf 100644
--- a/docs/data-management/update.md
+++ b/docs/data-management/update.md
@@ -52,7 +52,7 @@ is used to perform schema changes, repartition data, filter out unwanted data, e
 behaves just like any other [overwrite](#overwrite) with regard to atomic updates and locking.
 
 With [native batch](../ingestion/native-batch.md), use the [`druid` input
-source](../ingestion/native-batch-input-source.md#druid-input-source). If needed,
+source](../ingestion/input-sources.md#druid-input-source). If needed,
 [`transformSpec`](../ingestion/ingestion-spec.md#transformspec) can be used to filter or modify data during the
 reindexing job.
 
diff --git a/docs/design/architecture.md b/docs/design/architecture.md
index 21f69663d2..0362ca3c1d 100644
--- a/docs/design/architecture.md
+++ b/docs/design/architecture.md
@@ -80,7 +80,7 @@ both in deep storage and across your Historical servers for the data you plan to
 Deep storage is an important part of Druid's elastic, fault-tolerant design. Druid bootstraps from deep storage even
 if every single data server is lost and re-provisioned.
 
-For more details, please see the [Deep storage](../dependencies/deep-storage.md) page.
+For more details, please see the [Deep storage](../design/deep-storage.md) page.
 
 ### Metadata storage
 
@@ -88,13 +88,13 @@ The metadata storage holds various shared system metadata such as segment usage
 clustered deployment, this is typically a traditional RDBMS like PostgreSQL or MySQL. In a single-server
 deployment, it is typically a locally-stored Apache Derby database.
 
-For more details, please see the [Metadata storage](../dependencies/metadata-storage.md) page.
+For more details, please see the [Metadata storage](../design/metadata-storage.md) page.
 
 ### ZooKeeper
 
 Used for internal service discovery, coordination, and leader election.
 
-For more details, please see the [ZooKeeper](../dependencies/zookeeper.md) page.
+For more details, please see the [ZooKeeper](zookeeper.md) page.
 
 
 ## Storage design
@@ -203,7 +203,7 @@ new segments. Then it drops the old segments a few minutes later.
 Each segment has a lifecycle that involves the following three major areas:
 
 1. **Metadata store:** Segment metadata (a small JSON payload generally no more than a few KB) is stored in the
-[metadata store](../dependencies/metadata-storage.md) once a segment is done being constructed. The act of inserting
+[metadata store](../design/metadata-storage.md) once a segment is done being constructed. The act of inserting
 a record for a segment into the metadata store is called _publishing_. These metadata records have a boolean flag
 named `used`, which controls whether the segment is intended to be queryable or not. Segments created by realtime tasks will be
 available before they are published, since they are only published when the segment is complete and will not accept
diff --git a/docs/design/broker.md b/docs/design/broker.md
index 795f70faca..1c8c3be7b6 100644
--- a/docs/design/broker.md
+++ b/docs/design/broker.md
@@ -31,7 +31,7 @@ For basic tuning guidance for the Broker process, see [Basic cluster tuning](../
 
 ### HTTP endpoints
 
-For a list of API endpoints supported by the Broker, see [Broker API](../operations/api-reference.md#broker).
+For a list of API endpoints supported by the Broker, see [Broker API](../api-reference/api-reference.md#broker).
 
 ### Overview
 
diff --git a/docs/design/coordinator.md b/docs/design/coordinator.md
index 52f5f159e4..f0a162fe66 100644
--- a/docs/design/coordinator.md
+++ b/docs/design/coordinator.md
@@ -31,7 +31,7 @@ For basic tuning guidance for the Coordinator process, see [Basic cluster tuning
 
 ### HTTP endpoints
 
-For a list of API endpoints supported by the Coordinator, see [Coordinator API](../operations/api-reference.md#coordinator).
+For a list of API endpoints supported by the Coordinator, see [Coordinator API](../api-reference/api-reference.md#coordinator).
 
 ### Overview
 
@@ -92,7 +92,7 @@ Once some segments are found, it issues a [compaction task](../ingestion/tasks.m
 The maximum number of running compaction tasks is `min(sum of worker capacity * slotRatio, maxSlots)`.
 Note that even if `min(sum of worker capacity * slotRatio, maxSlots) = 0`, at least one compaction task is always submitted
 if the compaction is enabled for a dataSource.
-See [Automatic compaction configuration API](../operations/api-reference.md#automatic-compaction-configuration) and [Automatic compaction configuration](../configuration/index.md#automatic-compaction-dynamic-configuration) to enable and configure automatic compaction.
+See [Automatic compaction configuration API](../api-reference/api-reference.md#automatic-compaction-configuration) and [Automatic compaction configuration](../configuration/index.md#automatic-compaction-dynamic-configuration) to enable and configure automatic compaction.
 
 Compaction tasks might fail due to the following reasons:
 
diff --git a/docs/dependencies/deep-storage.md b/docs/design/deep-storage.md
similarity index 98%
rename from docs/dependencies/deep-storage.md
rename to docs/design/deep-storage.md
index b63f968bf5..f5adf35c6a 100644
--- a/docs/dependencies/deep-storage.md
+++ b/docs/design/deep-storage.md
@@ -73,4 +73,4 @@ See [druid-hdfs-storage extension documentation](../development/extensions-core/
 
 ## Additional options
 
-For additional deep storage options, please see our [extensions list](../development/extensions.md).
+For additional deep storage options, please see our [extensions list](../configuration/extensions.md).
diff --git a/docs/design/extensions-contrib/dropwizard.md b/docs/design/extensions-contrib/dropwizard.md
index a2a8c34d6e..fa1967cf05 100644
--- a/docs/design/extensions-contrib/dropwizard.md
+++ b/docs/design/extensions-contrib/dropwizard.md
@@ -24,7 +24,7 @@ title: "Dropwizard metrics emitter"
 
 # Dropwizard Emitter
 
-To use this extension, make sure to [include](../../development/extensions.md#loading-extensions) `dropwizard-emitter` in the extensions load list.
+To use this extension, make sure to [include](../../configuration/extensions.md#loading-extensions) `dropwizard-emitter` in the extensions load list.
 
 ## Introduction
 
diff --git a/docs/design/historical.md b/docs/design/historical.md
index f3580f5dac..a2fb3032de 100644
--- a/docs/design/historical.md
+++ b/docs/design/historical.md
@@ -31,7 +31,7 @@ For basic tuning guidance for the Historical process, see [Basic cluster tuning]
 
 ### HTTP endpoints
 
-For a list of API endpoints supported by the Historical, please see the [API reference](../operations/api-reference.md#historical).
+For a list of API endpoints supported by the Historical, please see the [API reference](../api-reference/api-reference.md#historical).
 
 ### Running
 
diff --git a/docs/design/indexer.md b/docs/design/indexer.md
index fa42912e76..eedf0fc775 100644
--- a/docs/design/indexer.md
+++ b/docs/design/indexer.md
@@ -35,7 +35,7 @@ For Apache Druid Indexer Process Configuration, see [Indexer Configuration](../c
 
 ### HTTP endpoints
 
-The Indexer process shares the same HTTP endpoints as the [MiddleManager](../operations/api-reference.md#middlemanager).
+The Indexer process shares the same HTTP endpoints as the [MiddleManager](../api-reference/api-reference.md#middlemanager).
 
 ### Running
 
diff --git a/docs/design/indexing-service.md b/docs/design/indexing-service.md
index acbf5f9eb0..793c31e81b 100644
--- a/docs/design/indexing-service.md
+++ b/docs/design/indexing-service.md
@@ -30,7 +30,7 @@ Indexing [tasks](../ingestion/tasks.md) are responsible for creating and [killin
 The indexing service is composed of three main components: [Peons](../design/peons.md) that can run a single task, [MiddleManagers](../design/middlemanager.md) that manage Peons, and an [Overlord](../design/overlord.md) that manages task distribution to MiddleManagers.
 Overlords and MiddleManagers may run on the same process or across multiple processes, while MiddleManagers and Peons always run on the same process.
 
-Tasks are managed using API endpoints on the Overlord service. See [Overlord Task API](../operations/api-reference.md#tasks) for more information.
+Tasks are managed using API endpoints on the Overlord service. Please see [Overlord Task API](../api-reference/api-reference.md#tasks) for more information.
 
 ![Indexing Service](../assets/indexing_service.png "Indexing Service")
 
diff --git a/docs/dependencies/metadata-storage.md b/docs/design/metadata-storage.md
similarity index 100%
rename from docs/dependencies/metadata-storage.md
rename to docs/design/metadata-storage.md
diff --git a/docs/design/middlemanager.md b/docs/design/middlemanager.md
index 5cfc29b707..e0096c6b29 100644
--- a/docs/design/middlemanager.md
+++ b/docs/design/middlemanager.md
@@ -31,7 +31,7 @@ For basic tuning guidance for the MiddleManager process, see [Basic cluster tuni
 
 ### HTTP endpoints
 
-For a list of API endpoints supported by the MiddleManager, please see the [API reference](../operations/api-reference.md#middlemanager).
+For a list of API endpoints supported by the MiddleManager, please see the [API reference](../api-reference/api-reference.md#middlemanager).
 
 ### Overview
 
diff --git a/docs/design/overlord.md b/docs/design/overlord.md
index 74c09dd590..7c0ce9ce87 100644
--- a/docs/design/overlord.md
+++ b/docs/design/overlord.md
@@ -31,7 +31,7 @@ For basic tuning guidance for the Overlord process, see [Basic cluster tuning](.
 
 ### HTTP endpoints
 
-For a list of API endpoints supported by the Overlord, please see the [API reference](../operations/api-reference.md#overlord).
+For a list of API endpoints supported by the Overlord, please see the [API reference](../api-reference/api-reference.md#overlord).
 
 ### Overview
 
diff --git a/docs/design/peons.md b/docs/design/peons.md
index 5b2953915f..d413dcb250 100644
--- a/docs/design/peons.md
+++ b/docs/design/peons.md
@@ -31,7 +31,7 @@ For basic tuning guidance for MiddleManager tasks, see [Basic cluster tuning](..
 
 ### HTTP endpoints
 
-For a list of API endpoints supported by the Peon, please see the [Peon API reference](../operations/api-reference.md#peon).
+For a list of API endpoints supported by the Peon, please see the [Peon API reference](../api-reference/api-reference.md#peon).
 
 Peons run a single task in a single JVM. MiddleManager is responsible for creating Peons for running tasks.
 Peons should rarely (if ever for testing purposes) be run on their own.
diff --git a/docs/design/router.md b/docs/design/router.md
index 582e424e6d..726f6831f1 100644
--- a/docs/design/router.md
+++ b/docs/design/router.md
@@ -36,7 +36,7 @@ For basic tuning guidance for the Router process, see [Basic cluster tuning](../
 
 ### HTTP endpoints
 
-For a list of API endpoints supported by the Router, see [Router API](../operations/api-reference.md#router).
+For a list of API endpoints supported by the Router, see [Router API](../api-reference/api-reference.md#router).
 
 ### Running
 
diff --git a/docs/dependencies/zookeeper.md b/docs/design/zookeeper.md
similarity index 100%
rename from docs/dependencies/zookeeper.md
rename to docs/design/zookeeper.md
diff --git a/docs/development/experimental-features.md b/docs/development/experimental-features.md
index 30d8c2f77c..d33f634a4b 100644
--- a/docs/development/experimental-features.md
+++ b/docs/development/experimental-features.md
@@ -32,7 +32,7 @@ Note that this document does not track the status of contrib extensions, all of
 
 - [SQL-based ingestion](../multi-stage-query/index.md)
 - [SQL-based ingestion concepts](../multi-stage-query/concepts.md)
-- [SQL-based ingestion and multi-stage query task API](../multi-stage-query/api.md)
+- [SQL-based ingestion and multi-stage query task API](../api-reference/sql-ingestion-api.md)
 
 ## Indexer process
 
diff --git a/docs/development/extensions-contrib/aliyun-oss-extensions.md b/docs/development/extensions-contrib/aliyun-oss-extensions.md
index f9b0e0e349..ab0573bdc4 100644
--- a/docs/development/extensions-contrib/aliyun-oss-extensions.md
+++ b/docs/development/extensions-contrib/aliyun-oss-extensions.md
@@ -27,7 +27,7 @@ This document describes how to use OSS as Druid deep storage.
 
 ## Installation
 
-Use the [pull-deps](../../operations/pull-deps.md) tool shipped with Druid to install the `aliyun-oss-extensions` extension, as described [here](../../development/extensions.md#community-extensions) on middle manager and historical nodes.
+Use the [pull-deps](../../operations/pull-deps.md) tool shipped with Druid to install the `aliyun-oss-extensions` extension, as described [here](../../configuration/extensions.md#community-extensions) on middle manager and historical nodes.
 
 ```bash
 java -classpath "{YOUR_DRUID_DIR}/lib/*" org.apache.druid.cli.Main tools pull-deps -c org.apache.druid.extensions.contrib:aliyun-oss-extensions:{YOUR_DRUID_VERSION}
diff --git a/docs/development/extensions-contrib/ambari-metrics-emitter.md b/docs/development/extensions-contrib/ambari-metrics-emitter.md
index 079d5e84ae..ee82ca6d78 100644
--- a/docs/development/extensions-contrib/ambari-metrics-emitter.md
+++ b/docs/development/extensions-contrib/ambari-metrics-emitter.md
@@ -23,7 +23,7 @@ title: "Ambari Metrics Emitter"
   -->
 
 
-To use this Apache Druid extension, [include](../../development/extensions.md#loading-extensions) `ambari-metrics-emitter` in the extensions load list.
+To use this Apache Druid extension, [include](../../configuration/extensions.md#loading-extensions) `ambari-metrics-emitter` in the extensions load list.
 
 ## Introduction
 
diff --git a/docs/development/extensions-contrib/cassandra.md b/docs/development/extensions-contrib/cassandra.md
index 980857f75f..916bacb917 100644
--- a/docs/development/extensions-contrib/cassandra.md
+++ b/docs/development/extensions-contrib/cassandra.md
@@ -23,7 +23,7 @@ title: "Apache Cassandra"
   -->
 
 
-To use this Apache Druid extension, [include](../../development/extensions.md#loading-extensions) `druid-cassandra-storage` in the extensions load list.
+To use this Apache Druid extension, [include](../../configuration/extensions.md#loading-extensions) `druid-cassandra-storage` in the extensions load list.
 
 [Apache Cassandra](http://www.datastax.com/what-we-offer/products-services/datastax-enterprise/apache-cassandra) can also
 be leveraged for deep storage.  This requires some additional Druid configuration as well as setting up the necessary
diff --git a/docs/development/extensions-contrib/cloudfiles.md b/docs/development/extensions-contrib/cloudfiles.md
index 8addd24249..83a1d0c7e1 100644
--- a/docs/development/extensions-contrib/cloudfiles.md
+++ b/docs/development/extensions-contrib/cloudfiles.md
@@ -23,7 +23,7 @@ title: "Rackspace Cloud Files"
   -->
 
 
-To use this Apache Druid extension, [include](../../development/extensions.md#loading-extensions) `druid-cloudfiles-extensions` in the extensions load list.
+To use this Apache Druid extension, [include](../../configuration/extensions.md#loading-extensions) `druid-cloudfiles-extensions` in the extensions load list.
 
 ## Deep Storage
 
diff --git a/docs/development/extensions-contrib/compressed-big-decimal.md b/docs/development/extensions-contrib/compressed-big-decimal.md
index 5c96527493..187d7e45fb 100644
--- a/docs/development/extensions-contrib/compressed-big-decimal.md
+++ b/docs/development/extensions-contrib/compressed-big-decimal.md
@@ -34,7 +34,7 @@ Compressed big decimal is an absolute number based complex type based on big dec
 2. Accuracy: Provides greater level of accuracy in decimal arithmetic
 
 ## Operations
-To use this extension, make sure to [load](../../development/extensions.md#loading-extensions) `compressed-big-decimal` to your config file.
+To use this extension, make sure to [load](../../configuration/extensions.md#loading-extensions) `compressed-big-decimal` to your config file.
 
 ## Configuration
 There are currently no configuration properties specific to Compressed Big Decimal
diff --git a/docs/development/extensions-contrib/distinctcount.md b/docs/development/extensions-contrib/distinctcount.md
index 17954fa4be..38f8e5efba 100644
--- a/docs/development/extensions-contrib/distinctcount.md
+++ b/docs/development/extensions-contrib/distinctcount.md
@@ -23,7 +23,7 @@ title: "DistinctCount Aggregator"
   -->
 
 
-To use this Apache Druid extension, [include](../../development/extensions.md#loading-extensions) the `druid-distinctcount` in the extensions load list.
+To use this Apache Druid extension, [include](../../configuration/extensions.md#loading-extensions) the `druid-distinctcount` in the extensions load list.
 
 Additionally, follow these steps:
 
diff --git a/docs/development/extensions-contrib/gce-extensions.md b/docs/development/extensions-contrib/gce-extensions.md
index 26e7bd4fbd..17a69c72f2 100644
--- a/docs/development/extensions-contrib/gce-extensions.md
+++ b/docs/development/extensions-contrib/gce-extensions.md
@@ -23,7 +23,7 @@ title: "GCE Extensions"
   -->
 
 
-To use this Apache Druid extension, [include](../../development/extensions.md#loading-extensions) `gce-extensions` in the extensions load list.
+To use this Apache Druid extension, [include](../../configuration/extensions.md#loading-extensions) `gce-extensions` in the extensions load list.
 
 At the moment, this extension enables only Druid to autoscale instances in GCE.
 
diff --git a/docs/development/extensions-contrib/graphite.md b/docs/development/extensions-contrib/graphite.md
index d7a024db1c..a6e04e9b00 100644
--- a/docs/development/extensions-contrib/graphite.md
+++ b/docs/development/extensions-contrib/graphite.md
@@ -23,7 +23,7 @@ title: "Graphite Emitter"
   -->
 
 
-To use this Apache Druid extension, [include](../../development/extensions.md#loading-extensions) `graphite-emitter` in the extensions load list.
+To use this Apache Druid extension, [include](../../configuration/extensions.md#loading-extensions) `graphite-emitter` in the extensions load list.
 
 ## Introduction
 
diff --git a/docs/development/extensions-contrib/influx.md b/docs/development/extensions-contrib/influx.md
index d0dc6841f0..eec9fb555e 100644
--- a/docs/development/extensions-contrib/influx.md
+++ b/docs/development/extensions-contrib/influx.md
@@ -23,7 +23,7 @@ title: "InfluxDB Line Protocol Parser"
   -->
 
 
-To use this Apache Druid extension, [include](../../development/extensions.md#loading-extensions) `druid-influx-extensions` in the extensions load list.
+To use this Apache Druid extension, [include](../../configuration/extensions.md#loading-extensions) `druid-influx-extensions` in the extensions load list.
 
 This extension enables Druid to parse the [InfluxDB Line Protocol](https://docs.influxdata.com/influxdb/v1.5/write_protocols/line_protocol_tutorial/), a popular text-based timeseries metric serialization format.
 
diff --git a/docs/development/extensions-contrib/influxdb-emitter.md b/docs/development/extensions-contrib/influxdb-emitter.md
index 039b9d185a..1086a5121e 100644
--- a/docs/development/extensions-contrib/influxdb-emitter.md
+++ b/docs/development/extensions-contrib/influxdb-emitter.md
@@ -23,7 +23,7 @@ title: "InfluxDB Emitter"
   -->
 
 
-To use this Apache Druid extension, [include](../../development/extensions.md#loading-extensions) `druid-influxdb-emitter` in the extensions load list.
+To use this Apache Druid extension, [include](../../configuration/extensions.md#loading-extensions) `druid-influxdb-emitter` in the extensions load list.
 
 ## Introduction
 
diff --git a/docs/development/extensions-contrib/k8s-jobs.md b/docs/development/extensions-contrib/k8s-jobs.md
index 5cbf4c507b..f3e8d53bb3 100644
--- a/docs/development/extensions-contrib/k8s-jobs.md
+++ b/docs/development/extensions-contrib/k8s-jobs.md
@@ -47,7 +47,7 @@ Task specific pod templates must be specified as the runtime property `druid.ind
 
 ## Configuration
 
-To use this extension please make sure to  [include](../extensions.md#loading-extensions)`druid-kubernetes-overlord-extensions` in the extensions load list for your overlord process.
+To use this extension please make sure to  [include](../../configuration/extensions.md#loading-extensions)`druid-kubernetes-overlord-extensions` in the extensions load list for your overlord process.
 
 The extension uses the task queue to limit how many concurrent tasks (K8s jobs) are in flight so it is required you have a reasonable value for `druid.indexer.queue.maxSize`.  Additionally set the variable `druid.indexer.runner.namespace` to the namespace in which you are running druid.
 
diff --git a/docs/development/extensions-contrib/kafka-emitter.md b/docs/development/extensions-contrib/kafka-emitter.md
index 85b8f10a7e..3457c249c7 100644
--- a/docs/development/extensions-contrib/kafka-emitter.md
+++ b/docs/development/extensions-contrib/kafka-emitter.md
@@ -23,7 +23,7 @@ title: "Kafka Emitter"
   -->
 
 
-To use this Apache Druid extension, [include](../../development/extensions.md#loading-extensions) `kafka-emitter` in the extensions load list.
+To use this Apache Druid extension, [include](../../configuration/extensions.md#loading-extensions) `kafka-emitter` in the extensions load list.
 
 ## Introduction
 
diff --git a/docs/development/extensions-contrib/momentsketch-quantiles.md b/docs/development/extensions-contrib/momentsketch-quantiles.md
index df7deb0d92..eaad48f69c 100644
--- a/docs/development/extensions-contrib/momentsketch-quantiles.md
+++ b/docs/development/extensions-contrib/momentsketch-quantiles.md
@@ -26,7 +26,7 @@ title: "Moment Sketches for Approximate Quantiles module"
 This module provides aggregators for approximate quantile queries using the [momentsketch](https://github.com/stanford-futuredata/momentsketch) library.
 The momentsketch provides coarse quantile estimates with less space and aggregation time overheads than traditional sketches, approaching the performance of counts and sums by reconstructing distributions from computed statistics.
 
-To use this Apache Druid extension, [include](../../development/extensions.md#loading-extensions) in the extensions load list.
+To use this Apache Druid extension, [include](../../configuration/extensions.md#loading-extensions) in the extensions load list.
 
 ### Aggregator
 
diff --git a/docs/development/extensions-contrib/moving-average-query.md b/docs/development/extensions-contrib/moving-average-query.md
index aa7fdb80b5..54bf2f3258 100644
--- a/docs/development/extensions-contrib/moving-average-query.md
+++ b/docs/development/extensions-contrib/moving-average-query.md
@@ -52,7 +52,7 @@ It runs the query in two main phases:
 ## Operations
 
 ### Installation
-Use [pull-deps](../../operations/pull-deps.md) tool shipped with Druid to install this [extension](../../development/extensions.md#community-extensions) on all Druid broker and router nodes.
+Use [pull-deps](../../operations/pull-deps.md) tool shipped with Druid to install this [extension](../../configuration/extensions.md#community-extensions) on all Druid broker and router nodes.
 
 ```bash
 java -classpath "<your_druid_dir>/lib/*" org.apache.druid.cli.Main tools pull-deps -c org.apache.druid.extensions.contrib:druid-moving-average-query:{VERSION}
diff --git a/docs/development/extensions-contrib/opentsdb-emitter.md b/docs/development/extensions-contrib/opentsdb-emitter.md
index 8d102baad8..e13cd5b55f 100644
--- a/docs/development/extensions-contrib/opentsdb-emitter.md
+++ b/docs/development/extensions-contrib/opentsdb-emitter.md
@@ -23,7 +23,7 @@ title: "OpenTSDB Emitter"
   -->
 
 
-To use this Apache Druid extension, [include](../../development/extensions.md#loading-extensions) `opentsdb-emitter` in the extensions load list.
+To use this Apache Druid extension, [include](../../configuration/extensions.md#loading-extensions) `opentsdb-emitter` in the extensions load list.
 
 ## Introduction
 
diff --git a/docs/development/extensions-contrib/prometheus.md b/docs/development/extensions-contrib/prometheus.md
index e5625f160b..2612921505 100644
--- a/docs/development/extensions-contrib/prometheus.md
+++ b/docs/development/extensions-contrib/prometheus.md
@@ -23,7 +23,7 @@ title: "Prometheus Emitter"
   -->
 
 
-To use this Apache Druid extension, [include](../../development/extensions.md#loading-extensions) `prometheus-emitter` in the extensions load list.
+To use this Apache Druid extension, [include](../../configuration/extensions.md#loading-extensions) `prometheus-emitter` in the extensions load list.
 
 ## Introduction
 
diff --git a/docs/development/extensions-contrib/redis-cache.md b/docs/development/extensions-contrib/redis-cache.md
index 4bd85e9cc5..63e0b9e509 100644
--- a/docs/development/extensions-contrib/redis-cache.md
+++ b/docs/development/extensions-contrib/redis-cache.md
@@ -28,7 +28,7 @@ Below are guidance and configuration options known to this module.
 
 ## Installation
 
-Use [pull-deps](../../operations/pull-deps.md) tool shipped with Druid to install this [extension](../../development/extensions.md#community-extensions) on broker, historical and middle manager nodes.
+Use [pull-deps](../../operations/pull-deps.md) tool shipped with Druid to install this [extension](../../configuration/extensions.md#community-extensions) on broker, historical and middle manager nodes.
 
 ```bash
 java -classpath "druid_dir/lib/*" org.apache.druid.cli.Main tools pull-deps -c org.apache.druid.extensions.contrib:druid-redis-cache:{VERSION}
@@ -38,7 +38,7 @@ java -classpath "druid_dir/lib/*" org.apache.druid.cli.Main tools pull-deps -c o
 
 To enable this extension after installation,
 
-1. [include](../../development/extensions.md#loading-extensions) this `druid-redis-cache` extension
+1. [include](../../configuration/extensions.md#loading-extensions) this `druid-redis-cache` extension
 2. to enable cache on broker nodes, follow [broker caching docs](../../configuration/index.md#broker-caching) to set related properties
 3. to enable cache on historical nodes, follow [historical caching docs](../../configuration/index.md#historical-caching) to set related properties
 4. to enable cache on middle manager nodes, follow [peon caching docs](../../configuration/index.md#peon-caching) to set related properties
diff --git a/docs/development/extensions-contrib/sqlserver.md b/docs/development/extensions-contrib/sqlserver.md
index 482715176c..0f2e8de24e 100644
--- a/docs/development/extensions-contrib/sqlserver.md
+++ b/docs/development/extensions-contrib/sqlserver.md
@@ -23,7 +23,7 @@ title: "Microsoft SQLServer"
   -->
 
 
-To use this Apache Druid extension, [include](../../development/extensions.md#loading-extensions) `sqlserver-metadata-storage` in the extensions load list.
+To use this Apache Druid extension, [include](../../configuration/extensions.md#loading-extensions) `sqlserver-metadata-storage` in the extensions load list.
 
 ## Setting up SQLServer
 
diff --git a/docs/development/extensions-contrib/statsd.md b/docs/development/extensions-contrib/statsd.md
index 61ff45f09c..5ad705a31f 100644
--- a/docs/development/extensions-contrib/statsd.md
+++ b/docs/development/extensions-contrib/statsd.md
@@ -23,7 +23,7 @@ title: "StatsD Emitter"
   -->
 
 
-To use this Apache Druid extension, [include](../../development/extensions.md#loading-extensions) `statsd-emitter` in the extensions load list.
+To use this Apache Druid extension, [include](../../configuration/extensions.md#loading-extensions) `statsd-emitter` in the extensions load list.
 
 ## Introduction
 
diff --git a/docs/development/extensions-contrib/tdigestsketch-quantiles.md b/docs/development/extensions-contrib/tdigestsketch-quantiles.md
index 705bbc2edb..59b5a851c1 100644
--- a/docs/development/extensions-contrib/tdigestsketch-quantiles.md
+++ b/docs/development/extensions-contrib/tdigestsketch-quantiles.md
@@ -35,7 +35,7 @@ to generate sketches during ingestion time itself and then combining them during
 The module also provides a postAggregator, quantilesFromTDigestSketch, that can be used to compute approximate 
 quantiles from T-Digest sketches generated by the tDigestSketch aggregator.
 
-To use this aggregator, make sure you [include](../../development/extensions.md#loading-extensions) the extension in your config file:
+To use this aggregator, make sure you [include](../../configuration/extensions.md#loading-extensions) the extension in your config file:
 
 ```
 druid.extensions.loadList=["druid-tdigestsketch"]
diff --git a/docs/development/extensions-contrib/thrift.md b/docs/development/extensions-contrib/thrift.md
index 70dbd4e3e8..3148982709 100644
--- a/docs/development/extensions-contrib/thrift.md
+++ b/docs/development/extensions-contrib/thrift.md
@@ -23,7 +23,7 @@ title: "Thrift"
   -->
 
 
-To use this Apache Druid extension, [include](../../development/extensions.md#loading-extensions) `druid-thrift-extensions` in the extensions load list.
+To use this Apache Druid extension, [include](../../configuration/extensions.md#loading-extensions) `druid-thrift-extensions` in the extensions load list.
 
 This extension enables Druid to ingest thrift compact data online (`ByteBuffer`) and offline (SequenceFile of type `<Writable, BytesWritable>` or LzoThriftBlock File).
 
diff --git a/docs/development/extensions-contrib/time-min-max.md b/docs/development/extensions-contrib/time-min-max.md
index 7d5588a0bb..f83667baea 100644
--- a/docs/development/extensions-contrib/time-min-max.md
+++ b/docs/development/extensions-contrib/time-min-max.md
@@ -23,7 +23,7 @@ title: "Timestamp Min/Max aggregators"
   -->
 
 
-To use this Apache Druid extension, [include](../../development/extensions.md#loading-extensions) `druid-time-min-max` in the extensions load list.
+To use this Apache Druid extension, [include](../../configuration/extensions.md#loading-extensions) `druid-time-min-max` in the extensions load list.
 
 These aggregators enable more precise calculation of min and max time of given events than `__time` column whose granularity is sparse, the same as query granularity.
 To use this feature, a "timeMin" or "timeMax" aggregator must be included at indexing time.
diff --git a/docs/development/extensions-core/approximate-histograms.md b/docs/development/extensions-core/approximate-histograms.md
index 08dd753353..7e24f958d4 100644
--- a/docs/development/extensions-core/approximate-histograms.md
+++ b/docs/development/extensions-core/approximate-histograms.md
@@ -23,7 +23,7 @@ title: "Approximate Histogram aggregators"
   -->
 
 
-To use this Apache Druid extension, [include](../../development/extensions.md#loading-extensions) `druid-histogram` in the extensions load list.
+To use this Apache Druid extension, [include](../../configuration/extensions.md#loading-extensions) `druid-histogram` in the extensions load list.
 
 The `druid-histogram` extension provides an approximate histogram aggregator and a fixed buckets histogram aggregator.
 
diff --git a/docs/development/extensions-core/avro.md b/docs/development/extensions-core/avro.md
index ac1b7ef51c..7db7530b07 100644
--- a/docs/development/extensions-core/avro.md
+++ b/docs/development/extensions-core/avro.md
@@ -31,7 +31,7 @@ The [Avro Stream Parser](../../ingestion/data-formats.md#avro-stream-parser) is
 
 ## Load the Avro extension
 
-To use the Avro extension, add the `druid-avro-extensions` to the list of loaded extensions. See [Loading extensions](../../development/extensions.md#loading-extensions) for more information.
+To use the Avro extension, add the `druid-avro-extensions` to the list of loaded extensions. See [Loading extensions](../../configuration/extensions.md#loading-extensions) for more information.
 
 ## Avro types
 
diff --git a/docs/development/extensions-core/azure.md b/docs/development/extensions-core/azure.md
index d63e74d865..c6a1c39790 100644
--- a/docs/development/extensions-core/azure.md
+++ b/docs/development/extensions-core/azure.md
@@ -23,7 +23,7 @@ title: "Microsoft Azure"
   -->
 
 
-To use this Apache Druid extension, [include](../../development/extensions.md#loading-extensions) `druid-azure-extensions` in the extensions load list.
+To use this Apache Druid extension, [include](../../configuration/extensions.md#loading-extensions) `druid-azure-extensions` in the extensions load list.
 
 ## Deep Storage
 
diff --git a/docs/development/extensions-core/bloom-filter.md b/docs/development/extensions-core/bloom-filter.md
index 0befa1418f..30cebeef6c 100644
--- a/docs/development/extensions-core/bloom-filter.md
+++ b/docs/development/extensions-core/bloom-filter.md
@@ -23,7 +23,7 @@ title: "Bloom Filter"
   -->
 
 
-To use this Apache Druid extension, [include](../../development/extensions.md#loading-extensions) `druid-bloom-filter` in the extensions load list.
+To use this Apache Druid extension, [include](../../configuration/extensions.md#loading-extensions) `druid-bloom-filter` in the extensions load list.
 
 This extension adds the ability to both construct bloom filters from query results, and filter query results by testing
 against a bloom filter. A Bloom filter is a probabilistic data structure for performing a set membership check. A bloom
@@ -98,7 +98,7 @@ SELECT COUNT(*) FROM druid.foo WHERE bloom_filter_test(<expr>, '<serialized_byte
 
 ### Expression and Virtual Column Support
 
-The bloom filter extension also adds a bloom filter [Druid expression](../../misc/math-expr.md) which shares syntax
+The bloom filter extension also adds a bloom filter [Druid expression](../../querying/math-expr.md) which shares syntax
 with the SQL operator.
 
 ```sql
diff --git a/docs/development/extensions-core/datasketches-extension.md b/docs/development/extensions-core/datasketches-extension.md
index c05806d901..00c955dc98 100644
--- a/docs/development/extensions-core/datasketches-extension.md
+++ b/docs/development/extensions-core/datasketches-extension.md
@@ -25,7 +25,7 @@ title: "DataSketches extension"
 
 Apache Druid aggregators based on [Apache DataSketches](https://datasketches.apache.org/) library. Sketches are data structures implementing approximate streaming mergeable algorithms. Sketches can be ingested from the outside of Druid or built from raw data at ingestion time. Sketches can be stored in Druid segments as additive metrics.
 
-To use the datasketches aggregators, make sure you [include](../../development/extensions.md#loading-extensions) the extension in your config file:
+To use the datasketches aggregators, make sure you [include](../../configuration/extensions.md#loading-extensions) the extension in your config file:
 
 ```
 druid.extensions.loadList=["druid-datasketches"]
diff --git a/docs/development/extensions-core/datasketches-hll.md b/docs/development/extensions-core/datasketches-hll.md
index 0fec926993..86c0724665 100644
--- a/docs/development/extensions-core/datasketches-hll.md
+++ b/docs/development/extensions-core/datasketches-hll.md
@@ -27,7 +27,7 @@ This module provides Apache Druid aggregators for distinct counting based on HLL
 the estimate of the number of distinct values presented to the sketch. You can also use post aggregators to produce a union of sketch columns in the same row.
 You can use the HLL sketch aggregator on any column to estimate its cardinality.
 
-To use this aggregator, make sure you [include](../../development/extensions.md#loading-extensions) the extension in your config file:
+To use this aggregator, make sure you [include](../../configuration/extensions.md#loading-extensions) the extension in your config file:
 
 ```
 druid.extensions.loadList=["druid-datasketches"]
diff --git a/docs/development/extensions-core/datasketches-kll.md b/docs/development/extensions-core/datasketches-kll.md
index 5d51b61221..5245f816d8 100644
--- a/docs/development/extensions-core/datasketches-kll.md
+++ b/docs/development/extensions-core/datasketches-kll.md
@@ -31,7 +31,7 @@ There are three major modes of operation:
 2. Building sketches from raw data during ingestion
 3. Building sketches from raw data at query time
 
-To use this aggregator, make sure you [include](../../development/extensions.md#loading-extensions) the extension in your config file:
+To use this aggregator, make sure you [include](../../configuration/extensions.md#loading-extensions) the extension in your config file:
 
 ```
 druid.extensions.loadList=["druid-datasketches"]
diff --git a/docs/development/extensions-core/datasketches-quantiles.md b/docs/development/extensions-core/datasketches-quantiles.md
index 7a512f1bc0..6f1962dd5e 100644
--- a/docs/development/extensions-core/datasketches-quantiles.md
+++ b/docs/development/extensions-core/datasketches-quantiles.md
@@ -31,7 +31,7 @@ There are three major modes of operation:
 2. Building sketches from raw data during ingestion
 3. Building sketches from raw data at query time
 
-To use this aggregator, make sure you [include](../../development/extensions.md#loading-extensions) the extension in your config file:
+To use this aggregator, make sure you [include](../../configuration/extensions.md#loading-extensions) the extension in your config file:
 
 ```
 druid.extensions.loadList=["druid-datasketches"]
diff --git a/docs/development/extensions-core/datasketches-theta.md b/docs/development/extensions-core/datasketches-theta.md
index bd46c4362e..844fdf35b1 100644
--- a/docs/development/extensions-core/datasketches-theta.md
+++ b/docs/development/extensions-core/datasketches-theta.md
@@ -30,7 +30,7 @@ At ingestion time, the Theta sketch aggregator creates Theta sketch objects whic
 
 Note that you can use `thetaSketch` aggregator on columns which were not ingested using the same. It will return estimated cardinality of the column. It is recommended to use it at ingestion time as well to make querying faster.
 
-To use this aggregator, make sure you [include](../../development/extensions.md#loading-extensions) the extension in your config file:
+To use this aggregator, make sure you [include](../../configuration/extensions.md#loading-extensions) the extension in your config file:
 
 ```
 druid.extensions.loadList=["druid-datasketches"]
diff --git a/docs/development/extensions-core/datasketches-tuple.md b/docs/development/extensions-core/datasketches-tuple.md
index 22622f187b..1dcf76c0b9 100644
--- a/docs/development/extensions-core/datasketches-tuple.md
+++ b/docs/development/extensions-core/datasketches-tuple.md
@@ -25,7 +25,7 @@ title: "DataSketches Tuple Sketch module"
 
 This module provides Apache Druid aggregators based on Tuple sketch from [Apache DataSketches](https://datasketches.apache.org/) library. ArrayOfDoublesSketch sketches extend the functionality of the count-distinct Theta sketches by adding arrays of double values associated with unique keys.
 
-To use this aggregator, make sure you [include](../../development/extensions.md#loading-extensions) the extension in your config file:
+To use this aggregator, make sure you [include](../../configuration/extensions.md#loading-extensions) the extension in your config file:
 
 ```
 druid.extensions.loadList=["druid-datasketches"]
diff --git a/docs/development/extensions-core/druid-aws-rds.md b/docs/development/extensions-core/druid-aws-rds.md
index 6bb0cd826b..48c4ba1747 100644
--- a/docs/development/extensions-core/druid-aws-rds.md
+++ b/docs/development/extensions-core/druid-aws-rds.md
@@ -31,7 +31,7 @@ title: "Druid AWS RDS Module"
 Before using this password provider, please make sure that you have connected all dots for db user to connect using token.
 See [AWS Guide](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/UsingWithRDS.IAMDBAuth.html).
 
-To use this extension, make sure you [include](../../development/extensions.md#loading-extensions) it in your config file along with other extensions e.g.
+To use this extension, make sure you [include](../../configuration/extensions.md#loading-extensions) it in your config file along with other extensions e.g.
 
 ```
 druid.extensions.loadList=["druid-aws-rds-extensions", "postgresql-metadata-storage", ...]
diff --git a/docs/development/extensions-core/druid-basic-security.md b/docs/development/extensions-core/druid-basic-security.md
index 1732ba19ac..d156c5a6b9 100644
--- a/docs/development/extensions-core/druid-basic-security.md
+++ b/docs/development/extensions-core/druid-basic-security.md
@@ -29,7 +29,7 @@ The Basic Security extension for Apache Druid adds:
 - an Escalator which determines the authentication scheme for internal Druid processes.
 - an Authorizer which implements basic role-based access control for Druid metadata store or LDAP users and groups.
 
-To load the extension, [include](../../development/extensions.md#loading-extensions) `druid-basic-security` in the `druid.extensions.loadList` in your `common.runtime.properties`. For example:
+To load the extension, [include](../../configuration/extensions.md#loading-extensions) `druid-basic-security` in the `druid.extensions.loadList` in your `common.runtime.properties`. For example:
 ```
 druid.extensions.loadList=["postgresql-metadata-storage", "druid-hdfs-storage", "druid-basic-security"]
 ```
@@ -37,7 +37,7 @@ druid.extensions.loadList=["postgresql-metadata-storage", "druid-hdfs-storage",
 To enable basic auth, configure the basic Authenticator, Escalator, and Authorizer in `common.runtime.properties`.
 See [Security overview](../../operations/security-overview.md#enable-an-authenticator) for an example configuration for HTTP basic authentication.
 
-Visit [Authentication and Authorization](../../design/auth.md) for more information on the implemented extension interfaces and for an example configuration.
+Visit [Authentication and Authorization](../../operations/auth.md) for more information on the implemented extension interfaces and for an example configuration.
 
 ## Configuration
 
diff --git a/docs/development/extensions-core/druid-kerberos.md b/docs/development/extensions-core/druid-kerberos.md
index bb0fbb1158..c29acdea7a 100644
--- a/docs/development/extensions-core/druid-kerberos.md
+++ b/docs/development/extensions-core/druid-kerberos.md
@@ -25,7 +25,7 @@ title: "Kerberos"
 
 Apache Druid Extension to enable Authentication for Druid Processes using Kerberos.
 This extension adds an Authenticator which is used to protect HTTP Endpoints using the simple and protected GSSAPI negotiation mechanism [SPNEGO](https://en.wikipedia.org/wiki/SPNEGO).
-Make sure to [include](../../development/extensions.md#loading-extensions) `druid-kerberos` in the extensions load list.
+Make sure to [include](../../configuration/extensions.md#loading-extensions) `druid-kerberos` in the extensions load list.
 
 
 ## Configuration
@@ -61,7 +61,7 @@ The special string _HOST will be replaced automatically with the value of config
 
 ### `druid.auth.authenticator.kerberos.excludedPaths`
 
-In older releases, the Kerberos authenticator had an `excludedPaths` property that allowed the user to specify a list of paths where authentication checks should be skipped. This property has been removed from the Kerberos authenticator because the path exclusion functionality is now handled across all authenticators/authorizers by setting `druid.auth.unsecuredPaths`, as described in the [main auth documentation](../../design/auth.md).
+In older releases, the Kerberos authenticator had an `excludedPaths` property that allowed the user to specify a list of paths where authentication checks should be skipped. This property has been removed from the Kerberos authenticator because the path exclusion functionality is now handled across all authenticators/authorizers by setting `druid.auth.unsecuredPaths`, as described in the [main auth documentation](../../operations/auth.md).
 
 ### Auth to Local Syntax
 `druid.auth.authenticator.kerberos.authToLocal` allows you to set a general rules for mapping principal names to local user names.
diff --git a/docs/development/extensions-core/druid-lookups.md b/docs/development/extensions-core/druid-lookups.md
index 5b19508c23..3699f94345 100644
--- a/docs/development/extensions-core/druid-lookups.md
+++ b/docs/development/extensions-core/druid-lookups.md
@@ -28,7 +28,7 @@ The main goal of this cache is to speed up the access to a high latency lookup s
 Thus user can define various caching strategies or and implementation per lookup, even if the source is the same.
 This module can be used side to side with other lookup module like the global cached lookup module.
 
-To use this Apache Druid extension, [include](../extensions.md#loading-extensions) `druid-lookups-cached-single` in the extensions load list.
+To use this Apache Druid extension, [include](../../configuration/extensions.md#loading-extensions) `druid-lookups-cached-single` in the extensions load list.
 
 > If using JDBC, you will need to add your database's client JAR files to the extension's directory.
 > For Postgres, the connector JAR is already included.
diff --git a/docs/development/extensions-core/druid-ranger-security.md b/docs/development/extensions-core/druid-ranger-security.md
index 481fb56adf..8c2b3b3653 100644
--- a/docs/development/extensions-core/druid-ranger-security.md
+++ b/docs/development/extensions-core/druid-ranger-security.md
@@ -22,9 +22,9 @@ title: "Apache Ranger Security"
   ~ under the License.
   -->
   
-This Apache Druid extension adds an Authorizer which implements access control for Druid, backed by [Apache Ranger](https://ranger.apache.org/). Please see [Authentication and Authorization](../../design/auth.md) for more information on the basic facilities this extension provides.
+This Apache Druid extension adds an Authorizer which implements access control for Druid, backed by [Apache Ranger](https://ranger.apache.org/). Please see [Authentication and Authorization](../../operations/auth.md) for more information on the basic facilities this extension provides.
 
-Make sure to [include](../../development/extensions.md#loading-extensions) `druid-ranger-security` in the extensions load list.
+Make sure to [include](../../configuration/extensions.md#loading-extensions) `druid-ranger-security` in the extensions load list.
 
 > The latest release of Apache Ranger is at the time of writing version 2.0. This version has a dependency on `log4j 1.2.17` which has a vulnerability if you configure it to use a `SocketServer` (CVE-2019-17571). Next to that, it also includes Kafka 2.0.0 which has 2 known vulnerabilities (CVE-2019-12399, CVE-2018-17196). Kafka can be used by the audit component in Ranger, but is not required. 
 
@@ -98,7 +98,7 @@ When installing a new Druid service in Apache Ranger for the first time, Ranger
 
 ### HTTP methods
 
-For information on what HTTP methods are supported for a particular request endpoint, please refer to the [API documentation](../../operations/api-reference.md).
+For information on what HTTP methods are supported for a particular request endpoint, please refer to the [API documentation](../../api-reference/api-reference.md).
 
 GET requires READ permission, while POST and DELETE require WRITE permission.
 
diff --git a/docs/development/extensions-core/google.md b/docs/development/extensions-core/google.md
index 813f9827e9..6df933f2da 100644
--- a/docs/development/extensions-core/google.md
+++ b/docs/development/extensions-core/google.md
@@ -28,7 +28,7 @@ This extension allows you to do 2 things:
 * [Ingest data](#reading-data-from-google-cloud-storage) from files stored in Google Cloud Storage.
 * Write segments to [deep storage](#deep-storage) in GCS.
 
-To use this Apache Druid extension, [include](../../development/extensions.md#loading-extensions) `druid-google-extensions` in the extensions load list.
+To use this Apache Druid extension, [include](../../configuration/extensions.md#loading-extensions) `druid-google-extensions` in the extensions load list.
 
 ### Required Configuration
 
@@ -36,7 +36,7 @@ To configure connectivity to google cloud, run druid processes with `GOOGLE_APPL
 
 ### Reading data from Google Cloud Storage
 
-The [Google Cloud Storage input source](../../ingestion/native-batch-input-source.md) is supported by the [Parallel task](../../ingestion/native-batch.md)
+The [Google Cloud Storage input source](../../ingestion/input-sources.md) is supported by the [Parallel task](../../ingestion/native-batch.md)
 to read objects directly from Google Cloud Storage. If you use the [Hadoop task](../../ingestion/hadoop.md),
 you can read data from Google Cloud Storage by specifying the paths in your [`inputSpec`](../../ingestion/hadoop.md#inputspec).
 
diff --git a/docs/development/extensions-core/hdfs.md b/docs/development/extensions-core/hdfs.md
index a49041b245..edc3fdb04c 100644
--- a/docs/development/extensions-core/hdfs.md
+++ b/docs/development/extensions-core/hdfs.md
@@ -23,7 +23,7 @@ title: "HDFS"
   -->
 
 
-To use this Apache Druid extension, [include](../../development/extensions.md#loading-extensions) `druid-hdfs-storage` in the extensions load list and run druid processes with `GOOGLE_APPLICATION_CREDENTIALS=/path/to/service_account_keyfile` in the environment.
+To use this Apache Druid extension, [include](../../configuration/extensions.md#loading-extensions) `druid-hdfs-storage` in the extensions load list and run druid processes with `GOOGLE_APPLICATION_CREDENTIALS=/path/to/service_account_keyfile` in the environment.
 
 ## Deep Storage
 
@@ -153,12 +153,12 @@ Tested with Druid 0.17.0, Hadoop 2.8.5 and gcs-connector jar 2.0.0-hadoop2.
 
 ### Native batch ingestion
 
-The [HDFS input source](../../ingestion/native-batch-input-source.md#hdfs-input-source) is supported by the [Parallel task](../../ingestion/native-batch.md)
+The [HDFS input source](../../ingestion/input-sources.md#hdfs-input-source) is supported by the [Parallel task](../../ingestion/native-batch.md)
 to read files directly from the HDFS Storage. You may be able to read objects from cloud storage
 with the HDFS input source, but we highly recommend to use a proper
-[Input Source](../../ingestion/native-batch-input-source.md) instead if possible because
-it is simple to set up. For now, only the [S3 input source](../../ingestion/native-batch-input-source.md#s3-input-source)
-and the [Google Cloud Storage input source](../../ingestion/native-batch-input-source.md#google-cloud-storage-input-source)
+[Input Source](../../ingestion/input-sources.md) instead if possible because
+it is simple to set up. For now, only the [S3 input source](../../ingestion/input-sources.md#s3-input-source)
+and the [Google Cloud Storage input source](../../ingestion/input-sources.md#google-cloud-storage-input-source)
 are supported for cloud storage types, and so you may still want to use the HDFS input source
 to read from cloud storage other than those two.
 
diff --git a/docs/development/extensions-core/kafka-extraction-namespace.md b/docs/development/extensions-core/kafka-extraction-namespace.md
index 0efbf7b815..2d841dfc94 100644
--- a/docs/development/extensions-core/kafka-extraction-namespace.md
+++ b/docs/development/extensions-core/kafka-extraction-namespace.md
@@ -22,7 +22,7 @@ title: "Apache Kafka Lookups"
   ~ under the License.
   -->
 
-To use this Apache Druid extension, [include](../../development/extensions.md#loading-extensions) `druid-lookups-cached-global` and `druid-kafka-extraction-namespace` in the extensions load list.
+To use this Apache Druid extension, [include](../../configuration/extensions.md#loading-extensions) `druid-lookups-cached-global` and `druid-kafka-extraction-namespace` in the extensions load list.
 
 If you need updates to populate as promptly as possible, it is possible to plug into a Kafka topic whose key is the old value and message is the desired new value (both in UTF-8) as a LookupExtractorFactory.
 
diff --git a/docs/development/extensions-core/kafka-ingestion.md b/docs/development/extensions-core/kafka-ingestion.md
index fd7f7b58ce..7a4b49f173 100644
--- a/docs/development/extensions-core/kafka-ingestion.md
+++ b/docs/development/extensions-core/kafka-ingestion.md
@@ -49,7 +49,7 @@ If your Kafka cluster enables consumer-group based ACLs, you can set `group.id`
 
 ## Load the Kafka indexing service
 
-To use the Kafka indexing service, load the `druid-kafka-indexing-service` extension on both the Overlord and the MiddleManagers. See [Loading extensions](../extensions.md#loading-extensions) for instructions on how to configure extensions.
+To use the Kafka indexing service, load the `druid-kafka-indexing-service` extension on both the Overlord and the MiddleManagers. See [Loading extensions](../../configuration/extensions.md) for instructions on how to configure extensions.
 
 ## Define a supervisor spec
 
diff --git a/docs/development/extensions-core/kafka-supervisor-operations.md b/docs/development/extensions-core/kafka-supervisor-operations.md
index fe8d1f562b..dbfa05174f 100644
--- a/docs/development/extensions-core/kafka-supervisor-operations.md
+++ b/docs/development/extensions-core/kafka-supervisor-operations.md
@@ -25,7 +25,7 @@ description: "Reference topic for running and maintaining Apache Kafka superviso
   -->
 This topic contains operations reference information to run and maintain Apache Kafka supervisors for Apache Druid. It includes descriptions of how some supervisor APIs work within Kafka Indexing Service.
 
-For all supervisor APIs, see [Supervisor APIs](../../operations/api-reference.md#supervisors).
+For all supervisor APIs, see [Supervisor APIs](../../api-reference/api-reference.md#supervisors).
 
 ## Getting Supervisor Status Report
 
diff --git a/docs/development/extensions-core/kafka-supervisor-reference.md b/docs/development/extensions-core/kafka-supervisor-reference.md
index b410d6f5b2..cf44be7bfd 100644
--- a/docs/development/extensions-core/kafka-supervisor-reference.md
+++ b/docs/development/extensions-core/kafka-supervisor-reference.md
@@ -205,7 +205,7 @@ The `tuningConfig` is optional and default parameters will be used if no `tuning
 | `indexSpecForIntermediatePersists`|                | Defines segment storage format options to be used at indexing time for intermediate persisted temporary segments. This can be used to disable dimension/metric compression on intermediate segments to reduce memory required for final merging. However, disabling compression on intermediate segments might increase page cache use while they are used before getting merged into final segment published, see [IndexSpec](#indexspec) for possib [...]
 | `reportParseExceptions`           | Boolean        | *DEPRECATED*. If true, exceptions encountered during parsing will be thrown and will halt ingestion; if false, unparseable rows and fields will be skipped. Setting `reportParseExceptions` to true will override existing configurations for `maxParseExceptions` and `maxSavedParseExceptions`, setting `maxParseExceptions` to 0 and limiting `maxSavedParseExceptions` to no more than 1.                                                         [...]
 | `handoffConditionTimeout`         | Long           | Milliseconds to wait for segment handoff. It must be >= 0, where 0 means to wait forever.                                                                                                                                                                                                                                                                                                                                                             [...]
-| `resetOffsetAutomatically`        | Boolean        | Controls behavior when Druid needs to read Kafka messages that are no longer available (i.e. when `OffsetOutOfRangeException` is encountered).<br/><br/>If false, the exception will bubble up, which will cause your tasks to fail and ingestion to halt. If this occurs, manual intervention is required to correct the situation; potentially using the [Reset Supervisor API](../../operations/api-reference.md#supervisors). This mode is useful [...]
+| `resetOffsetAutomatically`        | Boolean        | Controls behavior when Druid needs to read Kafka messages that are no longer available (i.e. when `OffsetOutOfRangeException` is encountered).<br/><br/>If false, the exception will bubble up, which will cause your tasks to fail and ingestion to halt. If this occurs, manual intervention is required to correct the situation; potentially using the [Reset Supervisor API](../../api-reference/api-reference.md#supervisors). This mode is use [...]
 | `workerThreads`                   | Integer        | The number of threads that the supervisor uses to handle requests/responses for worker tasks, along with any other internal asynchronous operation.                                                                                                                                                                                                                                                                                                   [...]
 | `chatAsync`                       | Boolean        | If true, use asynchronous communication with indexing tasks, and ignore the `chatThreads` parameter. If false, use synchronous communication in a thread pool of size `chatThreads`.                                                                                                                                                                                                                                                                  [...]
 | `chatThreads`                     | Integer        | The number of threads that will be used for communicating with indexing tasks. Ignored if `chatAsync` is `true` (the default).                                                                                                                                                                                                                                                                                                                        [...]
@@ -217,7 +217,7 @@ The `tuningConfig` is optional and default parameters will be used if no `tuning
 | `intermediateHandoffPeriod`       | ISO8601 Period | How often the tasks should hand off segments. Handoff will happen either if `maxRowsPerSegment` or `maxTotalRows` is hit or every `intermediateHandoffPeriod`, whichever happens earlier.                                                                                                                                                                                                                                                             [...]
 | `logParseExceptions`              | Boolean        | If true, log an error message when a parsing exception occurs, containing information about the row where the error occurred.                                                                                                                                                                                                                                                                                                                         [...]
 | `maxParseExceptions`              | Integer        | The maximum number of parse exceptions that can occur before the task halts ingestion and fails. Overridden if `reportParseExceptions` is set.                                                                                                                                                                                                                                                                                                        [...]
-| `maxSavedParseExceptions`         | Integer        | When a parse exception occurs, Druid can keep track of the most recent parse exceptions. `maxSavedParseExceptions` limits how many exception instances will be saved. These saved exceptions will be made available after the task finishes in the [task completion report](../../ingestion/tasks.md#reports). Overridden if `reportParseExceptions` is set.                                                                                          [...]
+| `maxSavedParseExceptions`         | Integer        | When a parse exception occurs, Druid can keep track of the most recent parse exceptions. `maxSavedParseExceptions` limits how many exception instances will be saved. These saved exceptions will be made available after the task finishes in the [task completion report](../../ingestion/tasks.md#task-reports). Overridden if `reportParseExceptions` is set.                                                                                     [...]
 
 #### IndexSpec
 
diff --git a/docs/development/extensions-core/kinesis-ingestion.md b/docs/development/extensions-core/kinesis-ingestion.md
index 57457992c3..046ffd2ad6 100644
--- a/docs/development/extensions-core/kinesis-ingestion.md
+++ b/docs/development/extensions-core/kinesis-ingestion.md
@@ -30,7 +30,7 @@ When you enable the Kinesis indexing service, you can configure *supervisors* on
 
 
 To use the Kinesis indexing service, load the `druid-kinesis-indexing-service` core Apache Druid extension (see
-[Including Extensions](../../development/extensions.md#loading-extensions)).
+[Including Extensions](../../configuration/extensions.md#loading-extensions)).
 
 > Before you deploy the Kinesis extension to production, read the [Kinesis known issues](#kinesis-known-issues).
 
@@ -284,7 +284,7 @@ The `tuningConfig` is optional. If no `tuningConfig` is specified, default param
 |`indexSpecForIntermediatePersists`|Object|Defines segment storage format options to be used at indexing time for intermediate persisted temporary segments. This can be used to disable dimension/metric compression on intermediate segments to reduce memory required for final merging. However, disabling compression on intermediate segments might increase page cache use while they are used before getting merged into final segment published, see [IndexSpec](#indexspec) for possible values.|  [...]
 |`reportParseExceptions`|Boolean|If true, exceptions encountered during parsing will be thrown and will halt ingestion; if false, unparseable rows and fields will be skipped.|no (default == false)|
 |`handoffConditionTimeout`|Long| Milliseconds to wait for segment handoff. It must be >= 0, where 0 means to wait forever.| no (default == 0)|
-|`resetOffsetAutomatically`|Boolean|Controls behavior when Druid needs to read Kinesis messages that are no longer available.<br/><br/>If false, the exception will bubble up, which will cause your tasks to fail and ingestion to halt. If this occurs, manual intervention is required to correct the situation; potentially using the [Reset Supervisor API](../../operations/api-reference.md#supervisors). This mode is useful for production, since it will make you aware of issues with ingestion.< [...]
+|`resetOffsetAutomatically`|Boolean|Controls behavior when Druid needs to read Kinesis messages that are no longer available.<br/><br/>If false, the exception will bubble up, which will cause your tasks to fail and ingestion to halt. If this occurs, manual intervention is required to correct the situation; potentially using the [Reset Supervisor API](../../api-reference/api-reference.md#supervisors). This mode is useful for production, since it will make you aware of issues with ingestio [...]
 |`skipSequenceNumberAvailabilityCheck`|Boolean|Whether to enable checking if the current sequence number is still available in a particular Kinesis shard. If set to false, the indexing task will attempt to reset the current sequence number (or not), depending on the value of `resetOffsetAutomatically`.|no (default == false)|
 |`workerThreads`|Integer|The number of threads that the supervisor uses to handle requests/responses for worker tasks, along with any other internal asynchronous operation.|no (default == min(10, taskCount))|
 |`chatAsync`|Boolean| If true, use asynchronous communication with indexing tasks, and ignore the `chatThreads` parameter. If false, use synchronous communication in a thread pool of size `chatThreads`.                                                                                                                                                                                                                                                                                                   [...]
@@ -338,7 +338,7 @@ For Concise bitmaps:
 ## Operations
 
 This section describes how some supervisor APIs work in Kinesis Indexing Service.
-For all supervisor APIs, check [Supervisor APIs](../../operations/api-reference.md#supervisors).
+For all supervisor APIs, check [Supervisor APIs](../../api-reference/api-reference.md#supervisors).
 
 ### AWS Authentication
 
diff --git a/docs/development/extensions-core/kubernetes.md b/docs/development/extensions-core/kubernetes.md
index c789a423d9..600c3ada21 100644
--- a/docs/development/extensions-core/kubernetes.md
+++ b/docs/development/extensions-core/kubernetes.md
@@ -29,7 +29,7 @@ Apache Druid Extension to enable using Kubernetes API Server for node discovery
 
 ## Configuration
 
-To use this extension please make sure to  [include](../../development/extensions.md#loading-extensions) `druid-kubernetes-extensions` in the extensions load list.
+To use this extension please make sure to  [include](../../configuration/extensions.md#loading-extensions) `druid-kubernetes-extensions` in the extensions load list.
 
 This extension works together with HTTP based segment and task management in Druid. Consequently, following configurations must be set on all Druid nodes.
 
diff --git a/docs/development/extensions-core/lookups-cached-global.md b/docs/development/extensions-core/lookups-cached-global.md
index 5842d3dea0..7e9d80d7ec 100644
--- a/docs/development/extensions-core/lookups-cached-global.md
+++ b/docs/development/extensions-core/lookups-cached-global.md
@@ -22,7 +22,7 @@ title: "Globally Cached Lookups"
   ~ under the License.
   -->
 
-To use this Apache Druid extension, [include](../extensions.md#loading-extensions) `druid-lookups-cached-global` in the extensions load list.
+To use this Apache Druid extension, [include](../../configuration/extensions.md#loading-extensions) `druid-lookups-cached-global` in the extensions load list.
 
 ## Configuration
 > Static configuration is no longer supported. Lookups can be configured through
@@ -168,7 +168,7 @@ It's highly recommended that `druid.lookup.namespace.numBufferedEntries` is set
 
 ## Supported lookups
 
-For additional lookups, please see our [extensions list](../extensions.md).
+For additional lookups, please see our [extensions list](../../configuration/extensions.md).
 
 ### URI lookup
 
diff --git a/docs/development/extensions-core/mysql.md b/docs/development/extensions-core/mysql.md
index f7c300c16a..5e08c7f5f3 100644
--- a/docs/development/extensions-core/mysql.md
+++ b/docs/development/extensions-core/mysql.md
@@ -23,7 +23,7 @@ title: "MySQL Metadata Store"
   -->
 
 
-To use this Apache Druid extension, [include](../../development/extensions.md#loading-extensions) `mysql-metadata-storage` in the extensions load list.
+To use this Apache Druid extension, [include](../../configuration/extensions.md#loading-extensions) `mysql-metadata-storage` in the extensions load list.
 
 > The MySQL extension requires the MySQL Connector/J library or MariaDB Connector/J library, neither of which are included in the Druid distribution.
 > Refer to the following section for instructions on how to install this library.
diff --git a/docs/development/extensions-core/orc.md b/docs/development/extensions-core/orc.md
index e358dc89d4..4be5867409 100644
--- a/docs/development/extensions-core/orc.md
+++ b/docs/development/extensions-core/orc.md
@@ -30,7 +30,7 @@ The extension provides the [ORC input format](../../ingestion/data-formats.md#or
 for [native batch ingestion](../../ingestion/native-batch.md) and [Hadoop batch ingestion](../../ingestion/hadoop.md), respectively.
 Please see corresponding docs for details.
 
-To use this extension, make sure to [include](../../development/extensions.md#loading-extensions) `druid-orc-extensions` in the extensions load list.
+To use this extension, make sure to [include](../../configuration/extensions.md#loading-extensions) `druid-orc-extensions` in the extensions load list.
 
 ### Migration from 'contrib' extension
 This extension, first available in version 0.15.0, replaces the previous 'contrib' extension which was available until
diff --git a/docs/development/extensions-core/parquet.md b/docs/development/extensions-core/parquet.md
index 614e5dcd23..a655c8989c 100644
--- a/docs/development/extensions-core/parquet.md
+++ b/docs/development/extensions-core/parquet.md
@@ -27,7 +27,7 @@ This Apache Druid module extends [Druid Hadoop based indexing](../../ingestion/h
 Apache Parquet files.
 
 Note: If using the `parquet-avro` parser for Apache Hadoop based indexing, `druid-parquet-extensions` depends on the `druid-avro-extensions` module, so be sure to
- [include  both](../../development/extensions.md#loading-extensions).
+ [include  both](../../configuration/extensions.md#loading-extensions).
 
 The `druid-parquet-extensions` provides the [Parquet input format](../../ingestion/data-formats.md#parquet), the [Parquet Hadoop parser](../../ingestion/data-formats.md#parquet-hadoop-parser),
 and the [Parquet Avro Hadoop Parser](../../ingestion/data-formats.md#parquet-avro-hadoop-parser) with `druid-avro-extensions`.
diff --git a/docs/development/extensions-core/postgresql.md b/docs/development/extensions-core/postgresql.md
index 07e17d1f29..cd88b22a43 100644
--- a/docs/development/extensions-core/postgresql.md
+++ b/docs/development/extensions-core/postgresql.md
@@ -23,7 +23,7 @@ title: "PostgreSQL Metadata Store"
   -->
 
 
-To use this Apache Druid extension, [include](../../development/extensions.md#loading-extensions) `postgresql-metadata-storage` in the extensions load list.
+To use this Apache Druid extension, [include](../../configuration/extensions.md#loading-extensions) `postgresql-metadata-storage` in the extensions load list.
 
 ## Setting up PostgreSQL
 
@@ -87,7 +87,7 @@ In most cases, the configuration options map directly to the [postgres JDBC conn
 
 ### PostgreSQL Firehose
 
-The PostgreSQL extension provides an implementation of an [SQL input source](../../ingestion/native-batch-input-source.md) which can be used to ingest data into Druid from a PostgreSQL database.
+The PostgreSQL extension provides an implementation of an [SQL input source](../../ingestion/input-sources.md) which can be used to ingest data into Druid from a PostgreSQL database.
 
 ```json
 {
diff --git a/docs/development/extensions-core/protobuf.md b/docs/development/extensions-core/protobuf.md
index d6080eca94..3c87809f72 100644
--- a/docs/development/extensions-core/protobuf.md
+++ b/docs/development/extensions-core/protobuf.md
@@ -23,7 +23,7 @@ title: "Protobuf"
   -->
 
 
-This Apache Druid extension enables Druid to ingest and understand the Protobuf data format. Make sure to [include](../../development/extensions.md#loading-extensions) `druid-protobuf-extensions` in the extensions load list.
+This Apache Druid extension enables Druid to ingest and understand the Protobuf data format. Make sure to [include](../../configuration/extensions.md#loading-extensions) `druid-protobuf-extensions` in the extensions load list.
 
 The `druid-protobuf-extensions` provides the [Protobuf Parser](../../ingestion/data-formats.md#protobuf-parser)
 for [stream ingestion](../../ingestion/index.md#streaming). See corresponding docs for details.
diff --git a/docs/development/extensions-core/s3.md b/docs/development/extensions-core/s3.md
index c8fa755dfb..20bd1682f2 100644
--- a/docs/development/extensions-core/s3.md
+++ b/docs/development/extensions-core/s3.md
@@ -28,11 +28,11 @@ This extension allows you to do 2 things:
 * [Ingest data](#reading-data-from-s3) from files stored in S3.
 * Write segments to [deep storage](#deep-storage) in S3.
 
-To use this Apache Druid extension, [include](../../development/extensions.md#loading-extensions) `druid-s3-extensions` in the extensions load list.
+To use this Apache Druid extension, [include](../../configuration/extensions.md#loading-extensions) `druid-s3-extensions` in the extensions load list.
 
 ### Reading data from S3
 
-Use a native batch [Parallel task](../../ingestion/native-batch.md) with an [S3 input source](../../ingestion/native-batch-input-source.md#s3-input-source) to read objects directly from S3.
+Use a native batch [Parallel task](../../ingestion/native-batch.md) with an [S3 input source](../../ingestion/input-sources.md#s3-input-source) to read objects directly from S3.
 
 Alternatively, use a [Hadoop task](../../ingestion/hadoop.md),
 and specify S3 paths in your [`inputSpec`](../../ingestion/hadoop.md#inputspec).
@@ -79,9 +79,9 @@ The configuration options are listed in order of precedence.  For example, if yo
 
 For more information, refer to the [Amazon Developer Guide](https://docs.aws.amazon.com/fr_fr/sdk-for-java/v1/developer-guide/credentials).
 
-Alternatively, you can bypass this chain by specifying an access key and secret key using a [Properties Object](../../ingestion/native-batch-input-source.md#s3-input-source) inside your ingestion specification.
+Alternatively, you can bypass this chain by specifying an access key and secret key using a [Properties Object](../../ingestion/input-sources.md#s3-input-source) inside your ingestion specification.
 
-Use the property [`druid.startup.logging.maskProperties`](../../configuration/index.md#startup-logging) to mask credentials information in Druid logs.  For example, `["password", "secretKey", "awsSecretAccessKey"]`.
+Use the property [`druid.startup.logging.maskProperties`](../../configuration/index.md#startup-logging) to mask credentials information in Druid logs. For example, `["password", "secretKey", "awsSecretAccessKey"]`.
 
 ### S3 permissions settings
 
diff --git a/docs/development/extensions-core/stats.md b/docs/development/extensions-core/stats.md
index bae91e8b72..917d3dcdd9 100644
--- a/docs/development/extensions-core/stats.md
+++ b/docs/development/extensions-core/stats.md
@@ -23,7 +23,7 @@ title: "Stats aggregator"
   -->
 
 
-This Apache Druid extension includes stat-related aggregators, including variance and standard deviations, etc. Make sure to [include](../../development/extensions.md#loading-extensions) `druid-stats` in the extensions load list.
+This Apache Druid extension includes stat-related aggregators, including variance and standard deviations, etc. Make sure to [include](../../configuration/extensions.md#loading-extensions) `druid-stats` in the extensions load list.
 
 ## Variance aggregator
 
diff --git a/docs/ingestion/data-formats.md b/docs/ingestion/data-formats.md
index 0bcef5777d..f3ac4d90bd 100644
--- a/docs/ingestion/data-formats.md
+++ b/docs/ingestion/data-formats.md
@@ -1,6 +1,7 @@
 ---
 id: data-formats
-title: "Data formats"
+title: Source input formats
+sidebar_label: Source input formats
 ---
 
 <!--
@@ -27,7 +28,7 @@ We welcome any contributions to new formats.
 
 This page lists all default and core extension data formats supported by Druid.
 For additional data formats supported with community extensions,
-please see our [community extensions list](../development/extensions.md#community-extensions).
+please see our [community extensions list](../configuration/extensions.md#community-extensions).
 
 ## Formatting data
 
@@ -690,7 +691,7 @@ and [Kinesis indexing service](../development/extensions-core/kinesis-ingestion.
 Consider using the [input format](#input-format) instead for these types of ingestion.
 
 This section lists all default and core extension parsers.
-For community extension parsers, please see our [community extensions list](../development/extensions.md#community-extensions).
+For community extension parsers, please see our [community extensions list](../configuration/extensions.md#community-extensions).
 
 ### String Parser
 
diff --git a/docs/ingestion/faq.md b/docs/ingestion/faq.md
index 1f0136ee06..b679c2eb15 100644
--- a/docs/ingestion/faq.md
+++ b/docs/ingestion/faq.md
@@ -33,7 +33,7 @@ If the number of ingested events seem correct, make sure your query is correctly
 
 ## Where do my Druid segments end up after ingestion?
 
-Depending on what `druid.storage.type` is set to, Druid will upload segments to some [Deep Storage](../dependencies/deep-storage.md). Local disk is used as the default deep storage.
+Depending on what `druid.storage.type` is set to, Druid will upload segments to some [Deep Storage](../design/deep-storage.md). Local disk is used as the default deep storage.
 
 ## My stream ingest is not handing segments off
 
@@ -51,21 +51,21 @@ Other common reasons that hand-off fails are as follows:
 
 ## How do I get HDFS to work?
 
-Make sure to include the `druid-hdfs-storage` and all the hadoop configuration, dependencies (that can be obtained by running command `hadoop classpath` on a machine where hadoop has been setup) in the classpath. And, provide necessary HDFS settings as described in [deep storage](../dependencies/deep-storage.md) .
+Make sure to include the `druid-hdfs-storage` and all the hadoop configuration, dependencies (that can be obtained by running command `hadoop classpath` on a machine where hadoop has been setup) in the classpath. And, provide necessary HDFS settings as described in [deep storage](../design/deep-storage.md) .
 
 ## How do I know when I can make query to Druid after submitting batch ingestion task?
 
 You can verify if segments created by a recent ingestion task are loaded onto historicals and available for querying using the following workflow.
 1. Submit your ingestion task.
-2. Repeatedly poll the [Overlord's tasks API](../operations/api-reference.md#tasks) ( `/druid/indexer/v1/task/{taskId}/status`) until your task is shown to be successfully completed.
-3. Poll the [Segment Loading by Datasource API](../operations/api-reference.md#segment-loading-by-datasource) (`/druid/coordinator/v1/datasources/{dataSourceName}/loadstatus`) with 
+2. Repeatedly poll the [Overlord's tasks API](../api-reference/api-reference.md#tasks) ( `/druid/indexer/v1/task/{taskId}/status`) until your task is shown to be successfully completed.
+3. Poll the [Segment Loading by Datasource API](../api-reference/api-reference.md#segment-loading-by-datasource) (`/druid/coordinator/v1/datasources/{dataSourceName}/loadstatus`) with 
 `forceMetadataRefresh=true` and `interval=<INTERVAL_OF_INGESTED_DATA>` once. 
 (Note: `forceMetadataRefresh=true` refreshes Coordinator's metadata cache of all datasources. This can be a heavy operation in terms of the load on the metadata store but is necessary to make sure that we verify all the latest segments' load status)
 If there are segments not yet loaded, continue to step 4, otherwise you can now query the data.
-4. Repeatedly poll the [Segment Loading by Datasource API](../operations/api-reference.md#segment-loading-by-datasource) (`/druid/coordinator/v1/datasources/{dataSourceName}/loadstatus`) with 
+4. Repeatedly poll the [Segment Loading by Datasource API](../api-reference/api-reference.md#segment-loading-by-datasource) (`/druid/coordinator/v1/datasources/{dataSourceName}/loadstatus`) with 
 `forceMetadataRefresh=false` and `interval=<INTERVAL_OF_INGESTED_DATA>`. 
 Continue polling until all segments are loaded. Once all segments are loaded you can now query the data. 
-Note that this workflow only guarantees that the segments are available at the time of the [Segment Loading by Datasource API](../operations/api-reference.md#segment-loading-by-datasource) call. Segments can still become missing because of historical process failures or any other reasons afterward.
+Note that this workflow only guarantees that the segments are available at the time of the [Segment Loading by Datasource API](../api-reference/api-reference.md#segment-loading-by-datasource) call. Segments can still become missing because of historical process failures or any other reasons afterward.
 
 ## I don't see my Druid segments on my Historical processes
 
diff --git a/docs/ingestion/hadoop.md b/docs/ingestion/hadoop.md
index f0a868984d..c75bccc857 100644
--- a/docs/ingestion/hadoop.md
+++ b/docs/ingestion/hadoop.md
@@ -28,7 +28,7 @@ instance of a Druid [Overlord](../design/overlord.md). Please refer to our [Hado
 comparisons between Hadoop-based, native batch (simple), and native batch (parallel) ingestion.
 
 To run a Hadoop-based ingestion task, write an ingestion spec as specified below. Then POST it to the
-[`/druid/indexer/v1/task`](../operations/api-reference.md#tasks) endpoint on the Overlord, or use the
+[`/druid/indexer/v1/task`](../api-reference/api-reference.md#tasks) endpoint on the Overlord, or use the
 `bin/post-index-task` script included with Druid.
 
 ## Tutorial
diff --git a/docs/ingestion/index.md b/docs/ingestion/index.md
index d152e75cd6..e483f4ad39 100644
--- a/docs/ingestion/index.md
+++ b/docs/ingestion/index.md
@@ -1,6 +1,7 @@
 ---
 id: index
-title: "Ingestion"
+title: Ingestion overview
+sidebar_label: Overview
 ---
 
 <!--
@@ -30,11 +31,11 @@ For most ingestion methods, the Druid [MiddleManager](../design/middlemanager.md
 [Indexer](../design/indexer.md) processes load your source data. The sole exception is Hadoop-based ingestion, which
 uses a Hadoop MapReduce job on YARN.
 
-During ingestion, Druid creates segments and stores them in [deep storage](../dependencies/deep-storage.md). Historical nodes load the segments into memory to respond to queries. For streaming ingestion, the Middle Managers and indexers can respond to queries in real-time with arriving data. See the [Storage design](../design/architecture.md#storage-design) section of the Druid design documentation for more information.
+During ingestion, Druid creates segments and stores them in [deep storage](../design/deep-storage.md). Historical nodes load the segments into memory to respond to queries. For streaming ingestion, the Middle Managers and indexers can respond to queries in real-time with arriving data. See the [Storage design](../design/architecture.md#storage-design) section of the Druid design documentation for more information.
 
 This topic introduces streaming and batch ingestion methods. The following topics describe ingestion concepts and information that apply to all [ingestion methods](#ingestion-methods):
 
-- [Druid data model](./data-model.md) introduces concepts of datasources, primary timestamp, dimensions, and metrics.
+- [Druid schema model](./schema-model.md) introduces concepts of datasources, primary timestamp, dimensions, and metrics.
 - [Data rollup](./rollup.md) describes rollup as a concept and provides suggestions to maximize the benefits of rollup.
 - [Partitioning](./partitioning.md) describes time chunk and secondary partitioning in Druid.
 - [Ingestion spec reference](./ingestion-spec.md) provides a reference for the configuration options in the ingestion spec.
@@ -68,13 +69,13 @@ runs for the duration of the job.
 | **Method** | [Native batch](./native-batch.md) | [SQL](../multi-stage-query/index.md) | [Hadoop-based](hadoop.md) |
 |---|-----|--------------|------------|
 | **Controller task type** | `index_parallel` | `query_controller` | `index_hadoop` |
-| **How you submit it** | Send an `index_parallel` spec to the [task API](../operations/api-reference.md#tasks). | Send an [INSERT](../multi-stage-query/concepts.md#insert) or [REPLACE](../multi-stage-query/concepts.md#replace) statement to the [SQL task API](../multi-stage-query/api.md#submit-a-query). | Send an `index_hadoop` spec to the [task API](../operations/api-reference.md#tasks). |
+| **How you submit it** | Send an `index_parallel` spec to the [task API](../api-reference/api-reference.md#tasks). | Send an [INSERT](../multi-stage-query/concepts.md#insert) or [REPLACE](../multi-stage-query/concepts.md#replace) statement to the [SQL task API](../api-reference/sql-ingestion-api.md#submit-a-query). | Send an `index_hadoop` spec to the [task API](../api-reference/api-reference.md#tasks). |
 | **Parallelism** | Using subtasks, if [`maxNumConcurrentSubTasks`](native-batch.md#tuningconfig) is greater than 1. | Using `query_worker` subtasks. | Using YARN. |
 | **Fault tolerance** | Workers automatically relaunched upon failure. Controller task failure leads to job failure. | Controller or worker task failure leads to job failure. | YARN containers automatically relaunched upon failure. Controller task failure leads to job failure. |
 | **Can append?** | Yes. | Yes (INSERT). | No. |
 | **Can overwrite?** | Yes. | Yes (REPLACE). | Yes. |
 | **External dependencies** | None. | None. | Hadoop cluster. |
-| **Input sources** | Any [`inputSource`](./native-batch-input-source.md). | Any [`inputSource`](./native-batch-input-source.md) (using [EXTERN](../multi-stage-query/concepts.md#extern)) or Druid datasource (using FROM). | Any Hadoop FileSystem or Druid datasource. |
+| **Input sources** | Any [`inputSource`](./input-sources.md). | Any [`inputSource`](./input-sources.md) (using [EXTERN](../multi-stage-query/concepts.md#extern)) or Druid datasource (using FROM). | Any Hadoop FileSystem or Druid datasource. |
 | **Input formats** | Any [`inputFormat`](./data-formats.md#input-format). | Any [`inputFormat`](./data-formats.md#input-format). | Any Hadoop InputFormat. |
 | **Secondary partitioning options** | Dynamic, hash-based, and range-based partitioning methods are available. See [partitionsSpec](./native-batch.md#partitionsspec) for details.| Range partitioning ([CLUSTERED BY](../multi-stage-query/concepts.md#clustering)). |  Hash-based or range-based partitioning via [`partitionsSpec`](hadoop.md#partitionsspec). |
 | **[Rollup modes](./rollup.md#perfect-rollup-vs-best-effort-rollup)** | Perfect if `forceGuaranteedRollup` = true in the [`tuningConfig`](native-batch.md#tuningconfig).  | Always perfect. | Always perfect. |
diff --git a/docs/ingestion/ingestion-spec.md b/docs/ingestion/ingestion-spec.md
index 126a40ca92..e5a2ee062d 100644
--- a/docs/ingestion/ingestion-spec.md
+++ b/docs/ingestion/ingestion-spec.md
@@ -1,7 +1,7 @@
 ---
 id: ingestion-spec
 title: Ingestion spec reference
-sidebar_label: Ingestion spec
+sidebar_label: Ingestion spec reference
 description: Reference for the configuration options in the ingestion spec.
 ---
 
@@ -157,7 +157,7 @@ The `dataSource` is located in `dataSchema` β†’ `dataSource` and is simply the n
 ### `timestampSpec`
 
 The `timestampSpec` is located in `dataSchema` β†’ `timestampSpec` and is responsible for
-configuring the [primary timestamp](./data-model.md#primary-timestamp). An example `timestampSpec` is:
+configuring the [primary timestamp](./schema-model.md#primary-timestamp). An example `timestampSpec` is:
 
 ```
 "timestampSpec": {
@@ -186,7 +186,7 @@ Treat `__time` as a millisecond timestamp: the number of milliseconds since Jan
 ### `dimensionsSpec`
 
 The `dimensionsSpec` is located in `dataSchema` β†’ `dimensionsSpec` and is responsible for
-configuring [dimensions](./data-model.md#dimensions). 
+configuring [dimensions](./schema-model.md#dimensions). An example `dimensionsSpec` is:
 
 You can either manually specify the dimensions or take advantage of schema auto-discovery where you allow Druid to infer all or some of the schema for your data. This means that you don't have to explicitly specify your dimensions and their type. 
 
@@ -223,8 +223,8 @@ A `dimensionsSpec` can have the following components:
 |------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- [...]
 | `dimensions`           | A list of [dimension names or objects](#dimension-objects). You cannot include the same column in both `dimensions` and `dimensionExclusions`.<br /><br />If `dimensions` and `spatialDimensions` are both null or empty arrays, Druid treats all columns other than timestamp or metrics that do not appear in `dimensionExclusions` as String-typed dimension columns. See [inclusions and exclusions](#inclusions-and-exclusions) for details.<br /><br />As a best practice,  [...]
 | `dimensionExclusions`  | The names of dimensions to exclude from ingestion. Only names are supported here, not objects.<br /><br />This list is only used if the `dimensions` and `spatialDimensions` lists are both null or empty arrays; otherwise it is ignored. See [inclusions and exclusions](#inclusions-and-exclusions) below for details.                                                                                                                                                         [...]
-| `spatialDimensions`    | An array of [spatial dimensions](../development/geo.md).                                                                                                                                                                                                                                                                                                                                                                                                                          [...]
-| `includeAllDimensions` | Note that this field only applies to string-based schema discovery where Druid ingests dimensions it discovers as strings. This is different from schema auto-discovery where Druid infers the type for data. You can set `includeAllDimensions` to true to ingest both explicit dimensions in the `dimensions` field and other dimensions that the ingestion task discovers from input data. In this case, the explicit dimensions will appear first in the order that you speci [...]
+| `spatialDimensions`    | An array of [spatial dimensions](../querying/geo.md).                                                                                                                                                                                                                                                                                                                                                                                                                             [...]
+| `includeAllDimensions` | Note that this field only applies to string-based schema discovery where Druid ingests dimensions it discovers as strings. This is different from schema auto-discovery where Druid infers the type for data. You can set `includeAllDimensions` to true to ingest both explicit dimensions in the `dimensions` field and other dimensions that the ingestion task discovers from input data. In this case, the explicit dimensions will appear first in the order that you speci [...]
 | `useSchemaDiscovery` | Configure Druid to use schema auto-discovery to discover some or all of the dimensions and types for your data. For any dimensions that aren't a uniform type, Druid ingests them as JSON. You can use this for native batch or streaming ingestion.  | false  | 
 
 
@@ -297,7 +297,7 @@ the following operations:
 3. Specifying which time chunks of segments should be created, for batch ingestion (via `intervals`).
 4. Specifying whether ingestion-time [rollup](./rollup.md) should be used or not (via `rollup`).
 
-Other than `rollup`, these operations are all based on the [primary timestamp](./data-model.md#primary-timestamp).
+Other than `rollup`, these operations are all based on the [primary timestamp](./schema-model.md#primary-timestamp).
 
 An example `granularitySpec` is:
 
@@ -367,7 +367,7 @@ Druid currently includes one kind of built-in transform, the expression transfor
 }
 ```
 
-The `expression` is a [Druid query expression](../misc/math-expr.md).
+The `expression` is a [Druid query expression](../querying/math-expr.md).
 
 > Conceptually, after input data records are read, Druid applies ingestion spec components in a particular order:
 > first [`flattenSpec`](data-formats.md#flattenspec) (if any), then [`timestampSpec`](#timestampspec), then [`transformSpec`](#transformspec),
@@ -397,8 +397,8 @@ For details about `inputFormat` and supported `parser` types, see the ["Data for
 
 For details about major components of the `parseSpec`, refer to their subsections:
 
-- [`timestampSpec`](#timestampspec), responsible for configuring the [primary timestamp](./data-model.md#primary-timestamp).
-- [`dimensionsSpec`](#dimensionsspec), responsible for configuring [dimensions](./data-model.md#dimensions).
+- [`timestampSpec`](#timestampspec), responsible for configuring the [primary timestamp](./schema-model.md#primary-timestamp).
+- [`dimensionsSpec`](#dimensionsspec), responsible for configuring [dimensions](./schema-model.md#dimensions).
 - [`flattenSpec`](#flattenspec), responsible for flattening nested data formats.
 
 An example `parser` is:
diff --git a/docs/ingestion/native-batch-input-source.md b/docs/ingestion/input-sources.md
similarity index 99%
rename from docs/ingestion/native-batch-input-source.md
rename to docs/ingestion/input-sources.md
index 624f8fab19..fe2d226b91 100644
--- a/docs/ingestion/native-batch-input-source.md
+++ b/docs/ingestion/input-sources.md
@@ -1,7 +1,7 @@
 ---
-id: native-batch-input-sources
-title: "Native batch input sources"
-sidebar_label: "Native batch: input sources"
+id: input-sources
+title: "Input sources"
+sidebar_label: "Input sources"
 ---
 
 <!--
diff --git a/docs/ingestion/native-batch-firehose.md b/docs/ingestion/native-batch-firehose.md
index ca848e725c..d1773b7503 100644
--- a/docs/ingestion/native-batch-firehose.md
+++ b/docs/ingestion/native-batch-firehose.md
@@ -1,6 +1,6 @@
 ---
 id: native-batch-firehose
-title: "Native batch ingestion with firehose (Deprecated)"
+title: "JSON-based batch ingestion with firehose (Deprecated)"
 sidebar_label: "Firehose (deprecated)"
 ---
 
@@ -23,7 +23,7 @@ sidebar_label: "Firehose (deprecated)"
   ~ under the License.
   -->
 
-> Firehose ingestion is deprecated. See [Migrate from firehose to input source ingestion](./migrate-from-firehose-ingestion.md) for instructions on migrating from firehose ingestion to using native batch ingestion input sources.
+> Firehose ingestion is deprecated. See [Migrate from firehose to input source ingestion](../operations/migrate-from-firehose-ingestion.md) for instructions on migrating from firehose ingestion to using native batch ingestion input sources.
 
 There are several firehoses readily available in Druid, some are meant for examples, others can be used directly in a production environment.
 
diff --git a/docs/ingestion/native-batch-simple-task.md b/docs/ingestion/native-batch-simple-task.md
index a7c0ef2e4e..105fdb65cb 100644
--- a/docs/ingestion/native-batch-simple-task.md
+++ b/docs/ingestion/native-batch-simple-task.md
@@ -1,7 +1,7 @@
 ---
 id: native-batch-simple-task
-title: "Native batch simple task indexing"
-sidebar_label: "Native batch (simple)"
+title: "JSON-based batch simple task indexing"
+sidebar_label: "JSON-based batch (simple)"
 ---
 
 <!--
diff --git a/docs/ingestion/native-batch.md b/docs/ingestion/native-batch.md
index cd2ef01e01..8a3d1e2988 100644
--- a/docs/ingestion/native-batch.md
+++ b/docs/ingestion/native-batch.md
@@ -1,7 +1,7 @@
 ---
 id: native-batch
-title: "Native batch ingestion"
-sidebar_label: "Native batch"
+title: JSON-based batch
+sidebar_label: JSON-based batch
 ---
 
 <!--
@@ -23,8 +23,7 @@ sidebar_label: "Native batch"
   ~ under the License.
   -->
 
-> This page describes native batch ingestion using [ingestion specs](ingestion-spec.md). Refer to the [ingestion
-> methods](../ingestion/index.md#batch) table to determine which ingestion method is right for you.
+> This page describes JSON-based batch ingestion using [ingestion specs](ingestion-spec.md). For SQL-based batch ingestion using the [`druid-multi-stage-query`](../multi-stage-query/index.md) extension, see [SQL-based ingestion](../multi-stage-query/index.md). Refer to the [ingestion methods](../ingestion/index.md#batch) table to determine which ingestion method is right for you.
 
 Apache Druid supports the following types of native batch indexing tasks:
 - Parallel task indexing (`index_parallel`) that can run multiple indexing tasks concurrently. Parallel task works well for production ingestion tasks.
@@ -35,14 +34,14 @@ This topic covers the configuration for `index_parallel` ingestion specs.
 For related information on batch indexing, see:
 - [Batch ingestion method comparison table](./index.md#batch) for a comparison of batch ingestion methods.
 - [Tutorial: Loading a file](../tutorials/tutorial-batch.md) for a tutorial on native batch ingestion.
-- [Input sources](./native-batch-input-source.md) for possible input sources.
-- [Input formats](./data-formats.md#input-format) for possible input formats.
+- [Input sources](./input-sources.md) for possible input sources.
+- [Source input formats](./data-formats.md#input-format) for possible input formats.
 
 ## Submit an indexing task
 
 To run either kind of native batch indexing task you can:
 - Use the **Load Data** UI in the web console to define and submit an ingestion spec.
-- Define an ingestion spec in JSON based upon the [examples](#parallel-indexing-example) and reference topics for batch indexing. Then POST the ingestion spec to the [Indexer API endpoint](../operations/api-reference.md#tasks), 
+- Define an ingestion spec in JSON based upon the [examples](#parallel-indexing-example) and reference topics for batch indexing. Then POST the ingestion spec to the [Indexer API endpoint](../api-reference/api-reference.md#tasks), 
 `/druid/indexer/v1/task`, the Overlord service. Alternatively you can use the indexing script included with Druid at `bin/post-index-task`.
 
 ## Parallel task indexing
@@ -196,7 +195,7 @@ The following table defines the primary sections of the input spec:
 |type|The task type. For parallel task indexing, set the value to `index_parallel`.|yes|
 |id|The task ID. If omitted, Druid generates the task ID using the task type, data source name, interval, and date-time stamp. |no|
 |spec|The ingestion spec that defines the [data schema](#dataschema), [IO config](#ioconfig), and [tuning config](#tuningconfig).|yes|
-|context|Context to specify various task configuration parameters. See [Task context parameters](tasks.md#context-parameters) for more details.|no|
+|context|Context to specify various task configuration parameters. See [Task context parameters](../ingestion/tasks.md#context-parameters) for more details.|no|
 
 ### `dataSchema`
 
@@ -263,7 +262,7 @@ The size-based split hint spec affects all splittable input sources except for t
 
 #### Segments Split Hint Spec
 
-The segments split hint spec is used only for [`DruidInputSource`](./native-batch-input-source.md).
+The segments split hint spec is used only for [`DruidInputSource`](./input-sources.md).
 
 |property|description|default|required?|
 |--------|-----------|-------|---------|
@@ -707,17 +706,17 @@ by assigning more task slots to them.
 Use the `inputSource` object to define the location where your index can read data. Only the native parallel task and simple task support the input source.
 
 For details on available input sources see:
-- [S3 input source](./native-batch-input-source.md#s3-input-source) (`s3`) reads data from AWS S3 storage.
-- [Google Cloud Storage input source](./native-batch-input-source.md#google-cloud-storage-input-source) (`gs`) reads data from Google Cloud Storage.
-- [Azure input source](./native-batch-input-source.md#azure-input-source) (`azure`) reads data from Azure Blob Storage and Azure Data Lake.
-- [HDFS input source](./native-batch-input-source.md#hdfs-input-source) (`hdfs`) reads data from HDFS storage.
-- [HTTP input Source](./native-batch-input-source.md#http-input-source) (`http`) reads data from HTTP servers.
-- [Inline input Source](./native-batch-input-source.md#inline-input-source) reads data you paste into the web console.
-- [Local input Source](./native-batch-input-source.md#local-input-source) (`local`) reads data from local storage.
-- [Druid input Source](./native-batch-input-source.md#druid-input-source) (`druid`) reads data from a Druid datasource.
-- [SQL input Source](./native-batch-input-source.md#sql-input-source) (`sql`) reads data from a RDBMS source.
-
-For information on how to combine input sources, see [Combining input source](./native-batch-input-source.md#combining-input-source).
+- [S3 input source](./input-sources.md#s3-input-source) (`s3`) reads data from AWS S3 storage.
+- [Google Cloud Storage input source](./input-sources.md#google-cloud-storage-input-source) (`gs`) reads data from Google Cloud Storage.
+- [Azure input source](./input-sources.md#azure-input-source) (`azure`) reads data from Azure Blob Storage and Azure Data Lake.
+- [HDFS input source](./input-sources.md#hdfs-input-source) (`hdfs`) reads data from HDFS storage.
+- [HTTP input Source](./input-sources.md#http-input-source) (`http`) reads data from HTTP servers.
+- [Inline input Source](./input-sources.md#inline-input-source) reads data you paste into the web console.
+- [Local input Source](./input-sources.md#local-input-source) (`local`) reads data from local storage.
+- [Druid input Source](./input-sources.md#druid-input-source) (`druid`) reads data from a Druid datasource.
+- [SQL input Source](./input-sources.md#sql-input-source) (`sql`) reads data from a RDBMS source.
+
+For information on how to combine input sources, see [Combining input source](./input-sources.md#combining-input-source).
 
 ### `segmentWriteOutMediumFactory`
 
diff --git a/docs/ingestion/rollup.md b/docs/ingestion/rollup.md
index 08cdfba378..241ffba367 100644
--- a/docs/ingestion/rollup.md
+++ b/docs/ingestion/rollup.md
@@ -1,7 +1,7 @@
 ---
 id: rollup
 title: "Data rollup"
-sidebar_label: Data rollup
+sidebar_label: Rollup
 description: Introduces rollup as a concept. Provides suggestions to maximize the benefits of rollup. Differentiates between perfect and best-effort rollup.
 ---
 
@@ -26,7 +26,7 @@ description: Introduces rollup as a concept. Provides suggestions to maximize th
 
 Druid can roll up data at ingestion time to reduce the amount of raw data to  store on disk. Rollup is a form of summarization or pre-aggregation. Rolling up data can dramatically reduce the size of data to be stored and reduce row counts by potentially orders of magnitude. As a trade-off for the efficiency of rollup, you lose the ability to query individual events.
 
-At ingestion time, you control rollup with the `rollup` setting in the [`granularitySpec`](./ingestion-spec.md#granularityspec). Rollup is enabled by default. This means Druid combines into a single row any rows that have identical [dimension](./data-model.md#dimensions) values and [timestamp](./data-model.md#primary-timestamp) values after [`queryGranularity`-based truncation](./ingestion-spec.md#granularityspec).
+At ingestion time, you control rollup with the `rollup` setting in the [`granularitySpec`](./ingestion-spec.md#granularityspec). Rollup is enabled by default. This means Druid combines into a single row any rows that have identical [dimension](./schema-model.md#dimensions) values and [timestamp](./schema-model.md#primary-timestamp) values after [`queryGranularity`-based truncation](./ingestion-spec.md#granularityspec).
 
 When you disable rollup, Druid loads each row as-is without doing any form of pre-aggregation. This mode is similar to databases that do not support a rollup feature. Set `rollup` to `false` if you want Druid to store each record as-is, without any rollup summarization.
 
diff --git a/docs/ingestion/schema-design.md b/docs/ingestion/schema-design.md
index eaada3651b..6d385c7b60 100644
--- a/docs/ingestion/schema-design.md
+++ b/docs/ingestion/schema-design.md
@@ -24,17 +24,17 @@ title: "Schema design tips"
 
 ## Druid's data model
 
-For general information, check out the documentation on [Druid's data model](./data-model.md) on the main
+For general information, check out the documentation on [Druid schema model](./schema-model.md) on the main
 ingestion overview page. The rest of this page discusses tips for users coming from other kinds of systems, as well as
 general tips and common practices.
 
-* Druid data is stored in [datasources](./data-model.md), which are similar to tables in a traditional RDBMS.
+* Druid data is stored in [datasources](./schema-model.md), which are similar to tables in a traditional RDBMS.
 * Druid datasources can be ingested with or without [rollup](./rollup.md). With rollup enabled, Druid partially aggregates your data during ingestion, potentially reducing its row count, decreasing storage footprint, and improving query performance. With rollup disabled, Druid stores one row for each row in your input data, without any pre-aggregation.
 * Every row in Druid must have a timestamp. Data is always partitioned by time, and every query has a time filter. Query results can also be broken down by time buckets like minutes, hours, days, and so on.
 * All columns in Druid datasources, other than the timestamp column, are either dimensions or metrics. This follows the [standard naming convention](https://en.wikipedia.org/wiki/Online_analytical_processing#Overview_of_OLAP_systems) of OLAP data.
 * Typical production datasources have tens to hundreds of columns.
-* [Dimension columns](./data-model.md#dimensions) are stored as-is, so they can be filtered on, grouped by, or aggregated at query time. They are always single Strings, [arrays of Strings](../querying/multi-value-dimensions.md), single Longs, single Doubles or single Floats.
-* [Metric columns](./data-model.md#metrics) are stored [pre-aggregated](../querying/aggregations.md), so they can only be aggregated at query time (not filtered or grouped by). They are often stored as numbers (integers or floats) but can also be stored as complex objects like [HyperLogLog sketches or approximate quantile sketches](../querying/aggregations.md#approximate-aggregations). Metrics can be configured at ingestion time even when rollup is disabled, but are most useful when roll [...]
+* [Dimension columns](./schema-model.md#dimensions) are stored as-is, so they can be filtered on, grouped by, or aggregated at query time. They are always single Strings, [arrays of Strings](../querying/multi-value-dimensions.md), single Longs, single Doubles or single Floats.
+* [Metric columns](./schema-model.md#metrics) are stored [pre-aggregated](../querying/aggregations.md), so they can only be aggregated at query time (not filtered or grouped by). They are often stored as numbers (integers or floats) but can also be stored as complex objects like [HyperLogLog sketches or approximate quantile sketches](../querying/aggregations.md#approximate-aggregations). Metrics can be configured at ingestion time even when rollup is disabled, but are most useful when ro [...]
 
 ## If you're coming from a
 
@@ -188,11 +188,11 @@ Druid is able to rapidly identify and retrieve data corresponding to time ranges
 If your data has more than one timestamp, you can ingest the others as secondary timestamps. The best way to do this
 is to ingest them as [long-typed dimensions](./ingestion-spec.md#dimensionsspec) in milliseconds format.
 If necessary, you can get them into this format using a [`transformSpec`](./ingestion-spec.md#transformspec) and
-[expressions](../misc/math-expr.md) like `timestamp_parse`, which returns millisecond timestamps.
+[expressions](../querying/math-expr.md) like `timestamp_parse`, which returns millisecond timestamps.
 
 At query time, you can query secondary timestamps with [SQL time functions](../querying/sql-scalar.md#date-and-time-functions)
 like `MILLIS_TO_TIMESTAMP`, `TIME_FLOOR`, and others. If you're using native Druid queries, you can use
-[expressions](../misc/math-expr.md).
+[expressions](../querying/math-expr.md).
 
 ### Nested dimensions
 
diff --git a/docs/ingestion/data-model.md b/docs/ingestion/schema-model.md
similarity index 98%
rename from docs/ingestion/data-model.md
rename to docs/ingestion/schema-model.md
index 8a5a126a8d..9d7358001d 100644
--- a/docs/ingestion/data-model.md
+++ b/docs/ingestion/schema-model.md
@@ -1,7 +1,7 @@
 ---
-id: data-model
-title: "Druid data model"
-sidebar_label: Data model
+id: schema-model
+title: Druid schema model
+sidebar_label: Schema model
 description: Introduces concepts of datasources, primary timestamp, dimensions, and metrics.
 ---
 
diff --git a/docs/ingestion/tasks.md b/docs/ingestion/tasks.md
index 95e61f88dc..6f6c2c010a 100644
--- a/docs/ingestion/tasks.md
+++ b/docs/ingestion/tasks.md
@@ -1,6 +1,7 @@
 ---
 id: tasks
-title: "Task reference"
+title: Task reference
+sidebar_label: Task reference
 ---
 
 <!--
@@ -25,7 +26,7 @@ title: "Task reference"
 Tasks do all [ingestion](index.md)-related work in Druid.
 
 For batch ingestion, you will generally submit tasks directly to Druid using the
-[Task APIs](../operations/api-reference.md#tasks). For streaming ingestion, tasks are generally submitted for you by a
+[Task APIs](../api-reference/api-reference.md#tasks). For streaming ingestion, tasks are generally submitted for you by a
 supervisor.
 
 ## Task API
@@ -33,7 +34,7 @@ supervisor.
 Task APIs are available in two main places:
 
 - The [Overlord](../design/overlord.md) process offers HTTP APIs to submit tasks, cancel tasks, check their status,
-review logs and reports, and more. Refer to the [Tasks API reference page](../operations/api-reference.md#tasks) for a
+review logs and reports, and more. Refer to the [Tasks API reference page](../api-reference/api-reference.md#tasks) for a
 full list.
 - Druid SQL includes a [`sys.tasks`](../querying/sql-metadata-tables.md#tasks-table) table that provides information about currently
 running tasks. This table is read-only, and has a limited (but useful!) subset of the full information available through
@@ -45,7 +46,7 @@ the Overlord APIs.
 
 A report containing information about the number of rows ingested, and any parse exceptions that occurred is available for both completed tasks and running tasks.
 
-The reporting feature is supported by [native batch tasks](../ingestion/native-batch.md), the Hadoop batch task, and Kafka and Kinesis ingestion tasks.
+The reporting feature is supported by [native batch tasks](native-batch.md), the Hadoop batch task, and Kafka and Kinesis ingestion tasks.
 
 ### Completion report
 
@@ -176,7 +177,7 @@ the `rowStats` map contains information about row counts. There is one entry for
 - `processed`: Number of rows successfully ingested without parsing errors
 - `processedBytes`: Total number of uncompressed bytes processed by the task. This reports the total byte size of all rows i.e. even those that are included in `processedWithError`, `unparseable` or `thrownAway`.
 - `processedWithError`: Number of rows that were ingested, but contained a parsing error within one or more columns. This typically occurs where input rows have a parseable structure but invalid types for columns, such as passing in a non-numeric String value for a numeric column.
-- `thrownAway`: Number of rows skipped. This includes rows with timestamps that were outside of the ingestion task's defined time interval and rows that were filtered out with a [`transformSpec`](./ingestion-spec.md#transformspec), but doesn't include the rows skipped by explicit user configurations. For example, the rows skipped by `skipHeaderRows` or `hasHeaderRow` in the CSV format are not counted.
+- `thrownAway`: Number of rows skipped. This includes rows with timestamps that were outside of the ingestion task's defined time interval and rows that were filtered out with a [`transformSpec`](ingestion-spec.md#transformspec), but doesn't include the rows skipped by explicit user configurations. For example, the rows skipped by `skipHeaderRows` or `hasHeaderRow` in the CSV format are not counted.
 - `unparseable`: Number of rows that could not be parsed at all and were discarded. This tracks input rows without a parseable structure, such as passing in non-JSON data when using a JSON parser.
 
 The `errorMsg` field shows a message describing the error that caused a task to fail. It will be null if the task was successful.
@@ -185,7 +186,7 @@ The `errorMsg` field shows a message describing the error that caused a task to
 
 ### Row stats
 
-The [native batch task](./native-batch.md), the Hadoop batch task, and Kafka and Kinesis ingestion tasks support retrieval of row stats while the task is running.
+The [native batch task](native-batch.md), the Hadoop batch task, and Kafka and Kinesis ingestion tasks support retrieval of row stats while the task is running.
 
 The live report can be accessed with a GET to the following URL on a Peon running a task:
 
@@ -249,7 +250,7 @@ http://<middlemanager-host>:<worker-port>/druid/worker/v1/chat/<task-id>/unparse
 ```
 
 Note that this functionality is not supported by all task types. Currently, it is only supported by the
-non-parallel [native batch task](../ingestion/native-batch.md) (type `index`) and the tasks created by the Kafka
+non-parallel [native batch task](native-batch.md) (type `index`) and the tasks created by the Kafka
 and Kinesis indexing services.
 
 <a name="locks"></a>
@@ -396,28 +397,28 @@ The following parameters apply to all task types.
 
 ## Task logs
 
-Logs are created by ingestion tasks as they run.  You can configure Druid to push these into a repository for long-term storage after they complete.
+Logs are created by ingestion tasks as they run. You can configure Druid to push these into a repository for long-term storage after they complete.
 
-Once the task has been submitted to the Overlord it remains `WAITING` for locks to be acquired.  Worker slot allocation is then `PENDING` until the task can actually start executing.
+Once the task has been submitted to the Overlord it remains `WAITING` for locks to be acquired. Worker slot allocation is then `PENDING` until the task can actually start executing.
 
 The task then starts creating logs in a local directory of the middle manager (or indexer) in a `log` directory for the specific `taskId` at [`druid.worker.baseTaskDirs`] (../configuration/index.md#middlemanager-configuration).
 
 When the task completes - whether it succeeds or fails - the middle manager (or indexer) will push the task log file into the location specified in [`druid.indexer.logs`](../configuration/index.md#task-logging).
 
-Task logs on the Druid web console are retrieved via an [API](../operations/api-reference.md#overlord) on the Overlord.  It automatically detects where the log file is, either in the middleManager / indexer or in long-term storage, and passes it back.
+Task logs on the Druid web console are retrieved via an [API](../api-reference/api-reference.md#overlord) on the Overlord. It automatically detects where the log file is, either in the middleManager / indexer or in long-term storage, and passes it back.
 
 If you don't see the log file in long-term storage, it means either:
 
 1. the middleManager / indexer failed to push the log file to deep storage or
 2. the task did not complete.
 
-You can check the middleManager / indexer logs locally to see if there was a push failure.  If there was not, check the Overlord's own process logs to see why the task failed before it started.
+You can check the middleManager / indexer logs locally to see if there was a push failure. If there was not, check the Overlord's own process logs to see why the task failed before it started.
 
 > If you are running the indexing service in remote mode, the task logs must be stored in S3, Azure Blob Store, Google Cloud Storage or HDFS.
 
 You can configure retention periods for logs in milliseconds by setting `druid.indexer.logs.kill` properties in [configuration](../configuration/index.md#task-logging).  The Overlord will then automatically manage task logs in log directories along with entries in task-related metadata storage tables.
 
-> Automatic log file deletion typically works based on the log file's 'modified' timestamp in the back-end store.  Large clock skews between Druid processes and the long-term store might result in unintended behavior.
+> Automatic log file deletion typically works based on the log file's 'modified' timestamp in the back-end store. Large clock skews between Druid processes and the long-term store might result in unintended behavior.
 
 ## Configuring task storage sizes
 
diff --git a/docs/multi-stage-query/concepts.md b/docs/multi-stage-query/concepts.md
index 5eba824f2b..2d03161027 100644
--- a/docs/multi-stage-query/concepts.md
+++ b/docs/multi-stage-query/concepts.md
@@ -38,7 +38,7 @@ and at least one worker task. As an experimental feature, the MSQ task engine al
 batch tasks. The behavior and result format of plain SELECT (without INSERT or REPLACE) is subject to change.
 
 You can execute SQL statements using the MSQ task engine through the **Query** view in the [web
-console](../operations/web-console.md) or through the [`/druid/v2/sql/task` API](api.md).
+console](../operations/web-console.md) or through the [`/druid/v2/sql/task` API](../api-reference/sql-ingestion-api.md).
 
 For more details on how SQL queries are executed using the MSQ task engine, see [multi-stage query
 tasks](#multi-stage-query-tasks).
@@ -52,7 +52,7 @@ To support ingestion, additional SQL functionality is available through the MSQ
 ### Read external data with `EXTERN`
 
 Query tasks can access external data through the `EXTERN` function, using any native batch [input
-source](../ingestion/native-batch-input-source.md) and [input format](../ingestion/data-formats.md#input-format).
+source](../ingestion/input-sources.md) and [input format](../ingestion/data-formats.md#input-format).
 
 `EXTERN` can read multiple files in parallel across different worker tasks. However, `EXTERN` does not split individual
 files across multiple worker tasks. If you have a small number of very large input files, you can increase query
@@ -126,7 +126,7 @@ The `__time` column is used for [partitioning by time](#partitioning-by-time). I
 column in your `INSERT` statement. However, Druid still creates a `__time` column in your Druid table and sets all
 timestamps to 1970-01-01 00:00:00.
 
-For more information, see [Primary timestamp](../ingestion/data-model.md#primary-timestamp).
+For more information, see [Primary timestamp](../ingestion/schema-model.md#primary-timestamp).
 
 <a name="partitioning"></a>
 
@@ -215,7 +215,7 @@ For an example, see [INSERT with rollup example](examples.md#insert-with-rollup)
 
 ### Execution flow
 
-When you execute a SQL statement using the task endpoint [`/druid/v2/sql/task`](api.md#submit-a-query), the following
+When you execute a SQL statement using the task endpoint [`/druid/v2/sql/task`](../api-reference/sql-ingestion-api.md#submit-a-query), the following
 happens:
 
 1. The Broker plans your SQL query into a native query, as usual.
diff --git a/docs/multi-stage-query/index.md b/docs/multi-stage-query/index.md
index acb880f03c..291211650b 100644
--- a/docs/multi-stage-query/index.md
+++ b/docs/multi-stage-query/index.md
@@ -1,7 +1,7 @@
 ---
 id: index
 title: SQL-based ingestion
-sidebar_label: Overview
+sidebar_label: SQL-based ingestion
 description: Introduces multi-stage query architecture and its task engine
 ---
 
@@ -62,7 +62,7 @@ transformation: creating new tables based on queries of other tables.
 To add the extension to an existing cluster, add `druid-multi-stage-query` to `druid.extensions.loadlist` in your
 `common.runtime.properties` file.
 
-For more information about how to load an extension, see [Loading extensions](../development/extensions.md#loading-extensions).
+For more information about how to load an extension, see [Loading extensions](../configuration/extensions.md#loading-extensions).
 
 To use [EXTERN](reference.md#extern-function), you need READ permission on the resource named "EXTERNAL" of the resource type
 "EXTERNAL". If you encounter a 403 error when trying to use `EXTERN`, verify that you have the correct permissions.
diff --git a/docs/multi-stage-query/reference.md b/docs/multi-stage-query/reference.md
index 5186171359..e9c238f9ad 100644
--- a/docs/multi-stage-query/reference.md
+++ b/docs/multi-stage-query/reference.md
@@ -61,7 +61,7 @@ FROM TABLE(
 
 `EXTERN` consists of the following parts:
 
-1. Any [Druid input source](../ingestion/native-batch-input-source.md) as a JSON-encoded string.
+1. Any [Druid input source](../ingestion/input-sources.md) as a JSON-encoded string.
 2. Any [Druid input format](../ingestion/data-formats.md) as a JSON-encoded string.
 3. A row signature, as a JSON-encoded array of column descriptors. Each column descriptor must have a
    `name` and a `type`. The type can be `string`, `long`, `double`, or `float`. This row signature is
diff --git a/docs/multi-stage-query/security.md b/docs/multi-stage-query/security.md
index 6422542890..dcbb68f545 100644
--- a/docs/multi-stage-query/security.md
+++ b/docs/multi-stage-query/security.md
@@ -43,8 +43,7 @@ To submit a query:
 Once a query is submitted, it executes as a [`query_controller`](concepts.md#execution-flow) task. Query tasks that
 users submit to the MSQ task engine are Overlord tasks, so they follow the Overlord's security model. This means that
 users with access to the Overlord API can perform some actions even if they didn't submit the query, including
-retrieving status or canceling a query. For more information about the Overlord API and the task API, see [APIs for
-SQL-based ingestion](./api.md).
+retrieving status or canceling a query. For more information about the Overlord API and the task API, see [APIs for SQL-based ingestion](../api-reference/sql-ingestion-api.md).
 
 To interact with a query through the Overlord API, users need the following permissions:
 
diff --git a/docs/design/auth.md b/docs/operations/auth.md
similarity index 100%
rename from docs/design/auth.md
rename to docs/operations/auth.md
diff --git a/docs/operations/clean-metadata-store.md b/docs/operations/clean-metadata-store.md
index 64dd2aeb88..e81fa90eb2 100644
--- a/docs/operations/clean-metadata-store.md
+++ b/docs/operations/clean-metadata-store.md
@@ -24,7 +24,7 @@ description: "Defines a strategy to maintain Druid metadata store performance by
   ~ under the License.
   -->
 
-Apache Druid relies on [metadata storage](../dependencies/metadata-storage.md) to track information on data storage, operations, and system configuration.
+Apache Druid relies on [metadata storage](../design/metadata-storage.md) to track information on data storage, operations, and system configuration.
 The metadata store includes the following:
 
 - Segment records
@@ -230,5 +230,5 @@ druid.coordinator.kill.datasource.durationToRetain=P4D
 ## Learn more
 See the following topics for more information:
 - [Metadata management](../configuration/index.md#metadata-management) for metadata store configuration reference.
-- [Metadata storage](../dependencies/metadata-storage.md) for an overview of the metadata storage database.
+- [Metadata storage](../design/metadata-storage.md) for an overview of the metadata storage database.
 
diff --git a/docs/operations/getting-started.md b/docs/operations/getting-started.md
index 773ade2031..8509d6baa1 100644
--- a/docs/operations/getting-started.md
+++ b/docs/operations/getting-started.md
@@ -39,7 +39,7 @@ If you wish to jump straight to deploying Druid as a cluster, or if you have an
 
 The [configuration reference](../configuration/index.md) describes all of Druid's configuration properties.
 
-The [API reference](../operations/api-reference.md) describes the APIs available on each Druid process.
+The [API reference](../api-reference/api-reference.md) describes the APIs available on each Druid process.
 
 The [basic cluster tuning guide](../operations/basic-cluster-tuning.md) is an introductory guide for tuning your Druid cluster.
 
diff --git a/docs/ingestion/migrate-from-firehose-ingestion.md b/docs/operations/migrate-from-firehose-ingestion.md
similarity index 92%
rename from docs/ingestion/migrate-from-firehose-ingestion.md
rename to docs/operations/migrate-from-firehose-ingestion.md
index fa4d1ad527..f470324b7f 100644
--- a/docs/ingestion/migrate-from-firehose-ingestion.md
+++ b/docs/operations/migrate-from-firehose-ingestion.md
@@ -1,6 +1,6 @@
 ---
 id: migrate-from-firehose
-title: "Migrate from firehose to input source ingestion"
+title: "Migrate from firehose to input source ingestion (legacy)"
 sidebar_label: "Migrate from firehose"
 ---
 
@@ -43,11 +43,11 @@ If you're unable to use the console or you have problems with the console method
 
 ### Update your ingestion spec manually
 
-To update your ingestion spec manually, copy your existing spec into a new file. Refer to [Native batch ingestion with firehose (Deprecated)](./native-batch-firehose.md) for a description of firehose properties.
+To update your ingestion spec manually, copy your existing spec into a new file. Refer to [Native batch ingestion with firehose (Deprecated)](../ingestion/native-batch-firehose.md) for a description of firehose properties.
 
 Edit the new file as follows:
 
-1. In the `ioConfig` component, replace the `firehose` definition with an `inputSource` definition for your chosen input source. See [Native batch input sources](./native-batch-input-source.md) for details.
+1. In the `ioConfig` component, replace the `firehose` definition with an `inputSource` definition for your chosen input source. See [Native batch input sources](../ingestion/input-sources.md) for details.
 2. Move the `timeStampSpec` definition from `parser.parseSpec` to the `dataSchema` component.
 3. Move the `dimensionsSpec` definition from `parser.parseSpec` to the `dataSchema` component.
 4. Move the `format` definition from `parser.parseSpec` to an `inputFormat` definition in `ioConfig`.
@@ -204,6 +204,6 @@ The following example illustrates the result of migrating the [example firehose
 
 For more information, see the following pages:
 
-- [Ingestion](./index.md): Overview of the Druid ingestion process.
-- [Native batch ingestion](./native-batch.md): Description of the supported native batch indexing tasks.
-- [Ingestion spec reference](./ingestion-spec.md): Description of the components and properties in the ingestion spec.
+- [Ingestion](../ingestion/index.md): Overview of the Druid ingestion process.
+- [Native batch ingestion](../ingestion/native-batch.md): Description of the supported native batch indexing tasks.
+- [Ingestion spec reference](../ingestion/ingestion-spec.md): Description of the components and properties in the ingestion spec.
diff --git a/docs/operations/pull-deps.md b/docs/operations/pull-deps.md
index ab2d5546b4..2e375f925c 100644
--- a/docs/operations/pull-deps.md
+++ b/docs/operations/pull-deps.md
@@ -136,4 +136,4 @@ java -classpath "/my/druid/lib/*" org.apache.druid.cli.Main tools pull-deps --de
 
 > Please note to use the pull-deps tool you must know the Maven groupId, artifactId, and version of your extension.
 >
-> For Druid community extensions listed [here](../development/extensions.md), the groupId is "org.apache.druid.extensions.contrib" and the artifactId is the name of the extension.
+> For Druid community extensions listed [here](../configuration/extensions.md), the groupId is "org.apache.druid.extensions.contrib" and the artifactId is the name of the extension.
diff --git a/docs/operations/rule-configuration.md b/docs/operations/rule-configuration.md
index f527db19c1..9719c877cc 100644
--- a/docs/operations/rule-configuration.md
+++ b/docs/operations/rule-configuration.md
@@ -34,11 +34,11 @@ You can specify the data to retain or drop in the following ways:
 - Period: segment data specified as an offset from the present time.
 - Interval: a fixed time range.
 
-Retention rules are persistent: they remain in effect until you change them. Druid stores retention rules in its [metadata store](../dependencies/metadata-storage.md).
+Retention rules are persistent: they remain in effect until you change them. Druid stores retention rules in its [metadata store](../design/metadata-storage.md).
 
 ## Set retention rules
 
-You can use the Druid [web console](./web-console.md) or the [Coordinator API](./api-reference.md#coordinator) to create and manage retention rules.
+You can use the Druid [web console](./web-console.md) or the [Coordinator API](../api-reference/api-reference.md#coordinator) to create and manage retention rules.
 
 ### Use the web console
 
diff --git a/docs/operations/security-overview.md b/docs/operations/security-overview.md
index 5bfda4d6ee..2fa4b45f38 100644
--- a/docs/operations/security-overview.md
+++ b/docs/operations/security-overview.md
@@ -173,7 +173,7 @@ The following takes you through sample configuration steps for enabling basic au
 
 See the following topics for more information:
 
-* [Authentication and Authorization](../design/auth.md) for more information about the Authenticator,
+* [Authentication and Authorization](../operations/auth.md) for more information about the Authenticator,
 Escalator, and Authorizer.
 * [Basic Security](../development/extensions-core/druid-basic-security.md) for more information about
 the extension used in the examples above.
diff --git a/docs/operations/security-user-auth.md b/docs/operations/security-user-auth.md
index da87386f62..faefca1a7e 100644
--- a/docs/operations/security-user-auth.md
+++ b/docs/operations/security-user-auth.md
@@ -39,7 +39,7 @@ Druid uses the following resource types:
 * STATE &ndash; Cluster-wide state resources.
 * SYSTEM_TABLE &ndash; when the Broker property `druid.sql.planner.authorizeSystemTablesDirectly` is true, then Druid uses this resource type to authorize the system tables in the `sys` schema in SQL.
 
-For specific resources associated with the resource types, see [Defining permissions](#defining-permissions) and the corresponding endpoint descriptions in [API reference](./api-reference.md).
+For specific resources associated with the resource types, see [Defining permissions](#defining-permissions) and the corresponding endpoint descriptions in [API reference](../api-reference/api-reference.md).
 
 ### Actions
 
@@ -141,7 +141,7 @@ There is only one possible resource name for the "STATE" config resource type, "
 Resource names for this type are system schema table names in the `sys` schema in SQL, for example `sys.segments` and `sys.server_segments`. Druid only enforces authorization for `SYSTEM_TABLE` resources when the Broker property `druid.sql.planner.authorizeSystemTablesDirectly` is true.
 ### HTTP methods
 
-For information on what HTTP methods are supported on a particular request endpoint, refer to [API reference](./api-reference.md).
+For information on what HTTP methods are supported on a particular request endpoint, refer to [API reference](../api-reference/api-reference.md).
 
 `GET` requests require READ permissions, while `POST` and `DELETE` requests require WRITE permissions.
 
diff --git a/docs/operations/tls-support.md b/docs/operations/tls-support.md
index 7189af9f2f..b5db993eee 100644
--- a/docs/operations/tls-support.md
+++ b/docs/operations/tls-support.md
@@ -83,9 +83,9 @@ be configured with a proper [SSLContext](http://docs.oracle.com/javase/8/docs/ap
 to validate the Server Certificates, otherwise communication will fail.
 
 Since, there are various ways to configure SSLContext, by default, Druid looks for an instance of SSLContext Guice binding
-while creating the HttpClient. This binding can be achieved writing a [Druid extension](../development/extensions.md)
+while creating the HttpClient. This binding can be achieved writing a [Druid extension](../configuration/extensions.md)
 which can provide an instance of SSLContext. Druid comes with a simple extension present [here](../development/extensions-core/simple-client-sslcontext.md)
-which should be useful enough for most simple cases, see [this](../development/extensions.md#loading-extensions) for how to include extensions.
+which should be useful enough for most simple cases, see [this](../configuration/extensions.md#loading-extensions) for how to include extensions.
 If this extension does not satisfy the requirements then please follow the extension [implementation](https://github.com/apache/druid/tree/master/extensions-core/simple-client-sslcontext)
 to create your own extension.
 
diff --git a/docs/querying/caching.md b/docs/querying/caching.md
index e8f3fcaedf..26fe063e68 100644
--- a/docs/querying/caching.md
+++ b/docs/querying/caching.md
@@ -53,19 +53,19 @@ Druid invalidates any cache the moment any underlying data change to avoid retur
 
 The primary form of caching in Druid is a *per-segment results cache*.  This cache stores partial query results on a per-segment basis and is enabled on Historical services by default.
 
-The per-segment results cache allows Druid to maintain a low-eviction-rate cache for segments that do not change, especially important for those segments that [historical](../design/historical.md) processes pull into their local _segment cache_ from [deep storage](../dependencies/deep-storage.md). Real-time segments, on the other hand, continue to have results computed at query time.
+The per-segment results cache allows Druid to maintain a low-eviction-rate cache for segments that do not change, especially important for those segments that [historical](../design/historical.md) processes pull into their local _segment cache_ from [deep storage](../design/deep-storage.md). Real-time segments, on the other hand, continue to have results computed at query time.
 
 Druid may potentially merge per-segment cached results with the results of later queries that use a similar basic shape with similar filters, aggregations, etc. For example, if the query is identical except that it covers a different time period.
 
 Per-segment caching is controlled by the parameters `useCache` and `populateCache`.
 
-Use per-segment caching with real-time data. For example, your queries request data actively arriving from Kafka alongside intervals in segments that are loaded on Historicals.  Druid can merge cached results from Historical segments with real-time results from the stream.  [Whole-query caching](#whole-query-caching), on the other hand, is not helpful in this scenario because new data from real-time ingestion will continually invalidate the entire cached result.
+Use per-segment caching with real-time data. For example, your queries request data actively arriving from Kafka alongside intervals in segments that are loaded on Historicals. Druid can merge cached results from Historical segments with real-time results from the stream. [Whole-query caching](#whole-query-caching), on the other hand, is not helpful in this scenario because new data from real-time ingestion will continually invalidate the entire cached result.
 
 ### Whole-query caching
 
 With *whole-query caching*, Druid caches the entire results of individual queries, meaning the Broker no longer needs to merge per-segment results from data processes.
 
-Use *whole-query caching* on the Broker to increase query efficiency when there is little risk of ingestion invalidating the cache at a segment level.  This applies particularly, for example, when _not_ using real-time ingestion.  Perhaps your queries tend to use batch-ingested data, in which case per-segment caching would be less efficient since the underlying segments hardly ever change, yet Druid would continue to acquire per-segment results for each query.
+Use *whole-query caching* on the Broker to increase query efficiency when there is little risk of ingestion invalidating the cache at a segment level. This applies particularly, for example, when _not_ using real-time ingestion. Perhaps your queries tend to use batch-ingested data, in which case per-segment caching would be less efficient since the underlying segments hardly ever change, yet Druid would continue to acquire per-segment results for each query.
 
 ## Where to enable caching
 
@@ -79,7 +79,7 @@ Use *whole-query caching* on the Broker to increase query efficiency when there
 
 - On Brokers for small production clusters with less than five servers. 
 
-Avoid using per-segment cache at the Broker for large production clusters. When the Broker cache is enabled (`druid.broker.cache.populateCache` is `true`) and `populateCache` _is not_ `false` in the [query context](../querying/query-context.md), individual Historicals will _not_ merge individual segment-level results, and instead pass these back to the lead Broker.  The Broker must then carry out a large merge from _all_ segments on its own.
+Avoid using per-segment cache at the Broker for large production clusters. When the Broker cache is enabled (`druid.broker.cache.populateCache` is `true`) and `populateCache` _is not_ `false` in the [query context](../querying/query-context.md), individual Historicals will _not_ merge individual segment-level results, and instead pass these back to the lead Broker. The Broker must then carry out a large merge from _all_ segments on its own.
 
 **Whole-query cache** is available exclusively on Brokers.
 
diff --git a/docs/querying/datasource.md b/docs/querying/datasource.md
index 211f58bd8c..e348bc81c6 100644
--- a/docs/querying/datasource.md
+++ b/docs/querying/datasource.md
@@ -333,7 +333,7 @@ Native join datasources have the following properties. All are required.
 |`left`|Left-hand datasource. Must be of type `table`, `join`, `lookup`, `query`, or `inline`. Placing another join as the left datasource allows you to join arbitrarily many datasources.|
 |`right`|Right-hand datasource. Must be of type `lookup`, `query`, or `inline`. Note that this is more rigid than what Druid SQL requires.|
 |`rightPrefix`|String prefix that will be applied to all columns from the right-hand datasource, to prevent them from colliding with columns from the left-hand datasource. Can be any string, so long as it is nonempty and is not be a prefix of the string `__time`. Any columns from the left-hand side that start with your `rightPrefix` will be shadowed. It is up to you to provide a prefix that will not shadow any important columns from the left side.|
-|`condition`|[Expression](../misc/math-expr.md) that must be an equality where one side is an expression of the left-hand side, and the other side is a simple column reference to the right-hand side. Note that this is more rigid than what Druid SQL requires: here, the right-hand reference must be a simple column reference; in SQL it can be an expression.|
+|`condition`|[Expression](math-expr.md) that must be an equality where one side is an expression of the left-hand side, and the other side is a simple column reference to the right-hand side. Note that this is more rigid than what Druid SQL requires: here, the right-hand reference must be a simple column reference; in SQL it can be an expression.|
 |`joinType`|`INNER` or `LEFT`.|
 
 #### Join performance
diff --git a/docs/querying/filters.md b/docs/querying/filters.md
index f243ebb411..82fdb81168 100644
--- a/docs/querying/filters.md
+++ b/docs/querying/filters.md
@@ -550,4 +550,4 @@ This filter allows for more flexibility, but it might be less performant than a
 }
 ```
 
-See the [Druid expression system](../misc/math-expr.md) for more details.
+See the [Druid expression system](math-expr.md) for more details.
diff --git a/docs/development/geo.md b/docs/querying/geo.md
similarity index 100%
rename from docs/development/geo.md
rename to docs/querying/geo.md
diff --git a/docs/misc/math-expr.md b/docs/querying/math-expr.md
similarity index 100%
rename from docs/misc/math-expr.md
rename to docs/querying/math-expr.md
diff --git a/docs/querying/nested-columns.md b/docs/querying/nested-columns.md
index d0809ad8c2..8f13372fdb 100644
--- a/docs/querying/nested-columns.md
+++ b/docs/querying/nested-columns.md
@@ -25,7 +25,7 @@ sidebar_label: Nested columns
 
 Apache Druid supports directly storing nested data structures in `COMPLEX<json>` columns. `COMPLEX<json>` columns store a copy of the structured data in JSON format and specialized internal columns and indexes for nested literal values&mdash;STRING, LONG, and DOUBLE types. An optimized [virtual column](./virtual-columns.md#nested-field-virtual-column) allows Druid to read and filter these values at speeds consistent with standard Druid LONG, DOUBLE, and STRING columns.
 
-Druid [SQL JSON functions](./sql-json-functions.md) allow you to extract, transform, and create `COMPLEX<json>` values in SQL queries, using the specialized virtual columns where appropriate. You can use the [JSON nested columns functions](../misc/math-expr.md#json-functions) in [native queries](./querying.md) using [expression virtual columns](./virtual-columns.md#expression-virtual-column), and in native ingestion with a [`transformSpec`](../ingestion/ingestion-spec.md#transformspec).
+Druid [SQL JSON functions](./sql-json-functions.md) allow you to extract, transform, and create `COMPLEX<json>` values in SQL queries, using the specialized virtual columns where appropriate. You can use the [JSON nested columns functions](math-expr.md#json-functions) in [native queries](./querying.md) using [expression virtual columns](./virtual-columns.md#expression-virtual-column), and in native ingestion with a [`transformSpec`](../ingestion/ingestion-spec.md#transformspec).
 
 You can use the JSON functions in INSERT and REPLACE statements in SQL-based ingestion, or in a `transformSpec` in native ingestion as an alternative to using a [`flattenSpec`](../ingestion/data-formats.md#flattenspec) object to "flatten" nested data for ingestion.
 
diff --git a/docs/querying/post-aggregations.md b/docs/querying/post-aggregations.md
index 935ca8fbce..e42b1d333f 100644
--- a/docs/querying/post-aggregations.md
+++ b/docs/querying/post-aggregations.md
@@ -92,7 +92,7 @@ The constant post-aggregator always returns the specified value.
 
 
 ### Expression post-aggregator
-The expression post-aggregator is defined using a Druid [expression](../misc/math-expr.md).
+The expression post-aggregator is defined using a Druid [expression](math-expr.md).
 
 ```json
 {
diff --git a/docs/querying/query-context.md b/docs/querying/query-context.md
index 0d6bd350ba..326753970f 100644
--- a/docs/querying/query-context.md
+++ b/docs/querying/query-context.md
@@ -26,7 +26,7 @@ sidebar_label: "Query context"
 The query context is used for various query configuration parameters. Query context parameters can be specified in
 the following ways:
 
-- For [Druid SQL](sql-api.md), context parameters are provided either in a JSON object named `context` to the
+- For [Druid SQL](../api-reference/sql-api.md), context parameters are provided either in a JSON object named `context` to the
 HTTP POST API, or as properties to the JDBC connection.
 - For [native queries](querying.md), context parameters are provided in a JSON object named `context`.
 
@@ -108,12 +108,12 @@ batches of rows at a time. Not all queries can be vectorized. In particular, vec
 requirements:
 
 - All query-level filters must either be able to run on bitmap indexes or must offer vectorized row-matchers. These
-include "selector", "bound", "in", "like", "regex", "search", "and", "or", and "not".
+include `selector`, `bound`, `in`, `like`, `regex`, `search`, `and`, `or`, and `not`.
 - All filters in filtered aggregators must offer vectorized row-matchers.
-- All aggregators must offer vectorized implementations. These include "count", "doubleSum", "floatSum", "longSum", "longMin",
- "longMax", "doubleMin", "doubleMax", "floatMin", "floatMax", "longAny", "doubleAny", "floatAny", "stringAny",
- "hyperUnique", "filtered", "approxHistogram", "approxHistogramFold", and "fixedBucketsHistogram" (with numerical input). 
-- All virtual columns must offer vectorized implementations. Currently for expression virtual columns, support for vectorization is decided on a per expression basis, depending on the type of input and the functions used by the expression. See the currently supported list in the [expression documentation](../misc/math-expr.md#vectorization-support).
+- All aggregators must offer vectorized implementations. These include `count`, `doubleSum`, `floatSum`, `longSum`. `longMin`,
+ `longMax`, `doubleMin`, `doubleMax`, `floatMin`, `floatMax`, `longAny`, `doubleAny`, `floatAny`, `stringAny`,
+ `hyperUnique`, `filtered`, `approxHistogram`, `approxHistogramFold`, and `fixedBucketsHistogram` (with numerical input). 
+- All virtual columns must offer vectorized implementations. Currently for expression virtual columns, support for vectorization is decided on a per expression basis, depending on the type of input and the functions used by the expression. See the currently supported list in the [expression documentation](math-expr.md#vectorization-support).
 - For GroupBy: All dimension specs must be "default" (no extraction functions or filtered dimension specs).
 - For GroupBy: No multi-value dimensions.
 - For Timeseries: No "descending" order.
diff --git a/docs/querying/querying.md b/docs/querying/querying.md
index 14885267d1..e957e7a527 100644
--- a/docs/querying/querying.md
+++ b/docs/querying/querying.md
@@ -108,7 +108,7 @@ curl -X DELETE "http://host:port/druid/v2/abc123"
 
 ### Authentication and authorization failures
 
-For [secured](../design/auth.md) Druid clusters, query requests respond with an HTTP 401 response code in case of an authentication failure. For authorization failures, an HTTP 403 response code is returned. 
+For [secured](../operations/auth.md) Druid clusters, query requests respond with an HTTP 401 response code in case of an authentication failure. For authorization failures, an HTTP 403 response code is returned. 
 
 ### Query execution failures
 
diff --git a/docs/querying/sql-data-types.md b/docs/querying/sql-data-types.md
index 4e6286d032..a98fca4a85 100644
--- a/docs/querying/sql-data-types.md
+++ b/docs/querying/sql-data-types.md
@@ -158,7 +158,7 @@ runtime property controls Druid's boolean logic mode. For the most SQL compliant
 When `druid.expressions.useStrictBooleans = false` (the default mode), Druid uses two-valued logic.
 
 When `druid.expressions.useStrictBooleans = true`, Druid uses three-valued logic for
-[expressions](../misc/math-expr.md) evaluation, such as `expression` virtual columns or `expression` filters.
+[expressions](math-expr.md) evaluation, such as `expression` virtual columns or `expression` filters.
 However, even in this mode, Druid uses two-valued logic for filter types other than `expression`.
 
 ## Nested columns
diff --git a/docs/querying/sql-query-context.md b/docs/querying/sql-query-context.md
index caab4772ab..e469fa390a 100644
--- a/docs/querying/sql-query-context.md
+++ b/docs/querying/sql-query-context.md
@@ -41,12 +41,12 @@ Configure Druid SQL query planning using the parameters in the table below.
 |`useApproximateCountDistinct`|Whether to use an approximate cardinality algorithm for `COUNT(DISTINCT foo)`.|`druid.sql.planner.useApproximateCountDistinct` on the Broker (default: true)|
 |`useGroupingSetForExactDistinct`|Whether to use grouping sets to execute queries with multiple exact distinct aggregations.|`druid.sql.planner.useGroupingSetForExactDistinct` on the Broker (default: false)|
 |`useApproximateTopN`|Whether to use approximate [TopN queries](topnquery.md) when a SQL query could be expressed as such. If false, exact [GroupBy queries](groupbyquery.md) will be used instead.|`druid.sql.planner.useApproximateTopN` on the Broker (default: true)|
-|`enableTimeBoundaryPlanning`|If true, SQL queries will get converted to TimeBoundary queries wherever possible. TimeBoundary queries are very efficient for min-max calculation on __time column in a datasource |`druid.query.default.context.enableTimeBoundaryPlanning` on the Broker (default: false)|
+|`enableTimeBoundaryPlanning`|If true, SQL queries will get converted to TimeBoundary queries wherever possible. TimeBoundary queries are very efficient for min-max calculation on `__time` column in a datasource |`druid.query.default.context.enableTimeBoundaryPlanning` on the Broker (default: false)|
 |`useNativeQueryExplain`|If true, `EXPLAIN PLAN FOR` will return the explain plan as a JSON representation of equivalent native query(s), else it will return the original version of explain plan generated by Calcite.<br /><br />This property is provided for backwards compatibility. It is not recommended to use this parameter unless you were depending on the older behavior.|`druid.sql.planner.useNativeQueryExplain` on the Broker (default: true)|
 |`sqlFinalizeOuterSketches`|If false (default behavior in Druid 25.0.0 and later), `DS_HLL`, `DS_THETA`, and `DS_QUANTILES_SKETCH` return sketches in query results, as documented. If true (default behavior in Druid 24.0.1 and earlier), sketches from these functions are finalized when they appear in query results.<br /><br />This property is provided for backwards compatibility with behavior in Druid 24.0.1 and earlier. It is not recommended to use this parameter unless you were depending [...]
 
 ## Setting the query context
-The query context parameters can be specified as a "context" object in the [JSON API](sql-api.md) or as a [JDBC connection properties object](sql-jdbc.md).
+The query context parameters can be specified as a "context" object in the [JSON API](../api-reference/sql-api.md) or as a [JDBC connection properties object](../api-reference/sql-jdbc.md).
 See examples for each option below.
 
 ### Example using JSON API
diff --git a/docs/querying/sql-translation.md b/docs/querying/sql-translation.md
index 18a2886354..4b0b2d8fbc 100644
--- a/docs/querying/sql-translation.md
+++ b/docs/querying/sql-translation.md
@@ -375,7 +375,7 @@ Additionally, some Druid native query features are not supported by the SQL lang
 include:
 
 - [Inline datasources](datasource.md#inline).
-- [Spatial filters](../development/geo.md).
+- [Spatial filters](geo.md).
 - [Multi-value dimensions](sql-data-types.md#multi-value-strings) are only partially implemented in Druid SQL. There are known
 inconsistencies between their behavior in SQL queries and in native queries due to how they are currently treated by
 the SQL planner.
diff --git a/docs/querying/sql.md b/docs/querying/sql.md
index 5888989612..c68ce28c84 100644
--- a/docs/querying/sql.md
+++ b/docs/querying/sql.md
@@ -26,7 +26,7 @@ sidebar_label: "Overview and syntax"
 > Apache Druid supports two query languages: Druid SQL and [native queries](querying.md).
 > This document describes the SQL language.
 
-You can query data in Druid datasources using [Druid SQL](./sql.md). Druid translates SQL queries into its [native query language](./querying.md). To learn about translation and how to get the best performance from Druid SQL, see [SQL query translation](./sql-translation.md).
+You can query data in Druid datasources using Druid SQL. Druid translates SQL queries into its [native query language](querying.md). To learn about translation and how to get the best performance from Druid SQL, see [SQL query translation](sql-translation.md).
 
 Druid SQL planning occurs on the Broker.
 Set [Broker runtime properties](../configuration/index.md#sql) to configure the query plan and JDBC querying.
@@ -42,8 +42,8 @@ For more information and SQL querying options see:
 - [Query translation](./sql-translation.md) for information about how Druid translates SQL queries to native queries before running them.
 
 For information about APIs, see:
-- [Druid SQL API](./sql-api.md) for information on the HTTP API.
-- [SQL JDBC driver API](./sql-jdbc.md) for information about the JDBC driver API.
+- [Druid SQL API](../api-reference/sql-api.md) for information on the HTTP API.
+- [SQL JDBC driver API](../api-reference/sql-jdbc.md) for information about the JDBC driver API.
 - [SQL query context](./sql-query-context.md) for information about the query context parameters that affect SQL planning.
 
 ## Syntax
@@ -270,7 +270,7 @@ written like `INTERVAL '1' HOUR`, `INTERVAL '1 02:03' DAY TO MINUTE`, `INTERVAL
 Druid SQL supports dynamic parameters using question mark (`?`) syntax, where parameters are bound to `?` placeholders
 at execution time. To use dynamic parameters, replace any literal in the query with a `?` character and provide a
 corresponding parameter value when you execute the query. Parameters are bound to the placeholders in the order in
-which they are passed. Parameters are supported in both the [HTTP POST](sql-api.md) and [JDBC](sql-jdbc.md) APIs.
+which they are passed. Parameters are supported in both the [HTTP POST](../api-reference/sql-api.md) and [JDBC](../api-reference/sql-jdbc.md) APIs.
 
 In certain cases, using dynamic parameters in expressions can cause type inference issues which cause your query to fail, for example:
 
diff --git a/docs/querying/using-caching.md b/docs/querying/using-caching.md
index d920b7bb06..12e8b5bbe2 100644
--- a/docs/querying/using-caching.md
+++ b/docs/querying/using-caching.md
@@ -83,7 +83,7 @@ As long as the service is set to populate the cache, you can set cache options f
   }
 }
 ```
-In this example the user has set `populateCache` to `false` to avoid filling the result cache with results for segments that are over a year old. For more information, see [Druid SQL client APIs](./sql-api.md).
+In this example the user has set `populateCache` to `false` to avoid filling the result cache with results for segments that are over a year old. For more information, see [Druid SQL client APIs](../api-reference/sql-api.md).
 
 
 
diff --git a/docs/querying/virtual-columns.md b/docs/querying/virtual-columns.md
index b5ccf80f42..6a7e8604c4 100644
--- a/docs/querying/virtual-columns.md
+++ b/docs/querying/virtual-columns.md
@@ -65,7 +65,7 @@ Each Apache Druid query can accept a list of virtual columns as a parameter. The
 
 ### Expression virtual column
 
-Expression virtual columns use Druid's native [expression](../misc/math-expr.md) system to allow defining query time
+Expression virtual columns use Druid's native [expression](math-expr.md) system to allow defining query time
 transforms of inputs from one or more columns.
 
 The expression virtual column has the following syntax:
@@ -83,7 +83,7 @@ The expression virtual column has the following syntax:
 |--------|-----------|---------|
 |type|Must be `"expression"` to indicate that this is an expression virtual column.|yes|
 |name|The name of the virtual column.|yes|
-|expression|An [expression](../misc/math-expr.md) that takes a row as input and outputs a value for the virtual column.|yes|
+|expression|An [expression](math-expr.md) that takes a row as input and outputs a value for the virtual column.|yes|
 |outputType|The expression's output will be coerced to this type. Can be LONG, FLOAT, DOUBLE, STRING, ARRAY types, or COMPLEX types.|no, default is FLOAT|
 
 ### Nested field virtual column
diff --git a/docs/tutorials/cluster.md b/docs/tutorials/cluster.md
index aeb47dff27..83b9fc2c97 100644
--- a/docs/tutorials/cluster.md
+++ b/docs/tutorials/cluster.md
@@ -1,6 +1,7 @@
 ---
 id: cluster
-title: "Clustered deployment"
+title: Clustered deployment
+sidebar_label: Clustered deployment
 ---
 
 <!--
diff --git a/docs/tutorials/docker.md b/docs/tutorials/docker.md
index 5b9c2351a0..c77abda039 100644
--- a/docs/tutorials/docker.md
+++ b/docs/tutorials/docker.md
@@ -1,6 +1,7 @@
 ---
 id: docker
-title: "Tutorial: Run with Docker"
+title:  Run with Docker
+sidebar_label: Run with Docker
 ---
 
 <!--
diff --git a/docs/tutorials/index.md b/docs/tutorials/index.md
index 5b26161424..009545a8b4 100644
--- a/docs/tutorials/index.md
+++ b/docs/tutorials/index.md
@@ -1,6 +1,7 @@
 ---
 id: index
 title: "Quickstart (local)"
+sidebar_label: Quickstart (local)
 ---
 
 <!--
@@ -218,6 +219,6 @@ See the following topics for more information:
 * [Tutorial: Load files using SQL](./tutorial-msq-extern.md) to learn how to generate a SQL query that loads external data into a Druid datasource.
 * [Tutorial: Load data with native batch ingestion](tutorial-batch-native.md) to load and query data with Druid's native batch ingestion feature.
 * [Tutorial: Load stream data from Apache Kafka](./tutorial-kafka.md) to load streaming data from a Kafka topic.
-* [Extensions](../development/extensions.md) for details on Druid extensions.
+* [Extensions](../configuration/extensions.md) for details on Druid extensions.
 
 Remember that after stopping Druid services, you can start clean next time by deleting the `var` directory from the Druid root directory and running the `bin/start-druid` script again. You may want to do this before using other data ingestion tutorials, since they use the same Wikipedia datasource.
diff --git a/docs/tutorials/tutorial-batch-hadoop.md b/docs/tutorials/tutorial-batch-hadoop.md
index dad431acf5..065dc76c1e 100644
--- a/docs/tutorials/tutorial-batch-hadoop.md
+++ b/docs/tutorials/tutorial-batch-hadoop.md
@@ -1,7 +1,7 @@
 ---
 id: tutorial-batch-hadoop
-title: "Tutorial: Load batch data using Apache Hadoop"
-sidebar_label: "Load from Apache Hadoop"
+title: Load batch data using Apache Hadoop
+sidebar_label: Load from Apache Hadoop
 ---
 
 <!--
diff --git a/docs/tutorials/tutorial-batch-native.md b/docs/tutorials/tutorial-batch-native.md
index 83156290f3..2f1bd7c47c 100644
--- a/docs/tutorials/tutorial-batch-native.md
+++ b/docs/tutorials/tutorial-batch-native.md
@@ -1,6 +1,7 @@
 ---
 id: tutorial-batch-native
 title: "Load data with native batch ingestion"
+sidebar_label: Load data with native batch ingestion
 ---
 
 <!--
@@ -90,8 +91,8 @@ in the Druid root directory represents Wikipedia page edits for a given day.
    You do not need to adjust transformation or filtering settings, as applying ingestion time transforms and 
    filters are out of scope for this tutorial.
 
-8. The Configure schema settings are where you configure what [dimensions](../ingestion/data-model.md#dimensions) 
-   and [metrics](../ingestion/data-model.md#metrics) are ingested. The outcome of this configuration represents exactly how the 
+8. The Configure schema settings are where you configure what [dimensions](../ingestion/schema-model.md#dimensions) 
+   and [metrics](../ingestion/schema-model.md#metrics) are ingested. The outcome of this configuration represents exactly how the 
    data will appear in Druid after ingestion. 
 
    Since our dataset is very small, you can turn off [rollup](../ingestion/rollup.md) 
diff --git a/docs/tutorials/tutorial-batch.md b/docs/tutorials/tutorial-batch.md
index 12fdbfb8ef..cbbb563b1e 100644
--- a/docs/tutorials/tutorial-batch.md
+++ b/docs/tutorials/tutorial-batch.md
@@ -1,6 +1,6 @@
 ---
 id: tutorial-batch
-title: "Tutorial: Loading a file"
+title: "Load a file"
 sidebar_label: "Load files natively"
 ---
 
diff --git a/docs/tutorials/tutorial-compaction.md b/docs/tutorials/tutorial-compaction.md
index 4035de0ba9..51d67c331f 100644
--- a/docs/tutorials/tutorial-compaction.md
+++ b/docs/tutorials/tutorial-compaction.md
@@ -1,7 +1,7 @@
 ---
 id: tutorial-compaction
-title: "Tutorial: Compacting segments"
-sidebar_label: "Compacting segments"
+title: Compact segments
+sidebar_label: Compact segments
 ---
 
 <!--
diff --git a/docs/tutorials/tutorial-ingestion-spec.md b/docs/tutorials/tutorial-ingestion-spec.md
index d4360a8db1..5ac652fcbd 100644
--- a/docs/tutorials/tutorial-ingestion-spec.md
+++ b/docs/tutorials/tutorial-ingestion-spec.md
@@ -1,7 +1,7 @@
 ---
 id: tutorial-ingestion-spec
-title: "Tutorial: Writing an ingestion spec"
-sidebar_label: "Writing an ingestion spec"
+title: Write an ingestion spec
+sidebar_label: Write an ingestion spec
 ---
 
 <!--
diff --git a/docs/tutorials/tutorial-jdbc.md b/docs/tutorials/tutorial-jdbc.md
index 53b7cb86f7..e3788fab14 100644
--- a/docs/tutorials/tutorial-jdbc.md
+++ b/docs/tutorials/tutorial-jdbc.md
@@ -1,6 +1,6 @@
 ---
 id: tutorial-jdbc
-title: "Tutorial: Using the JDBC driver to query Druid"
+title: Use the JDBC driver to query Druid
 sidebar_label: JDBC connector
 ---
 
@@ -23,9 +23,9 @@ sidebar_label: JDBC connector
   ~ under the License.
   -->
 
-Redirecting you to the JDBC connector examples...
+Redirecting you to the JDBC driver API...
 <head>
-<script>window.location.replace("https://druid.apache.org/docs/latest/querying/sql-jdbc.html#examples")</script>
+<script>window.location.replace("https://druid.apache.org/docs/latest/api-reference/sql-jdbc.html")</script>
 </head>
-<a href="https://druid.apache.org/docs/latest/querying/sql-jdbc.html#examples">Click here if you are not redirected.</a>
+<a href="https://druid.apache.org/docs/latest/api-reference/sql-jdbc.html">Click here if you are not redirected.</a>
 
diff --git a/docs/tutorials/tutorial-jupyter-index.md b/docs/tutorials/tutorial-jupyter-index.md
index d7f401cae5..19382f9e4f 100644
--- a/docs/tutorials/tutorial-jupyter-index.md
+++ b/docs/tutorials/tutorial-jupyter-index.md
@@ -1,6 +1,7 @@
 ---
 id: tutorial-jupyter-index
-title: "Jupyter Notebook tutorials"
+title: Jupyter Notebook tutorials
+sidebar_label: Jupyter Notebook tutorials
 ---
 
 <!--
diff --git a/docs/tutorials/tutorial-kafka.md b/docs/tutorials/tutorial-kafka.md
index 18b906d297..3340f42c6e 100644
--- a/docs/tutorials/tutorial-kafka.md
+++ b/docs/tutorials/tutorial-kafka.md
@@ -1,7 +1,7 @@
 ---
 id: tutorial-kafka
-title: "Tutorial: Load streaming data from Apache Kafka"
-sidebar_label: "Load from Apache Kafka"
+title: Load streaming data from Apache Kafka
+sidebar_label: Load from Apache Kafka
 ---
 
 <!--
@@ -128,7 +128,7 @@ To use the console data loader:
 
    ![Data loader schema](../assets/tutorial-kafka-data-loader-05.png "Data loader schema")
 
-7. In the **Configure schema** step, you can select data types for the columns and configure [dimensions](../ingestion/data-model.md#dimensions) and [metrics](../ingestion/data-model.md#metrics) to ingest into Druid. The console does most of this for you, but you need to create JSON-type dimensions for the three nested columns in the data. 
+7. In the **Configure schema** step, you can select data types for the columns and configure [dimensions](../ingestion/schema-model.md#dimensions) and [metrics](../ingestion/schema-model.md#metrics) to ingest into Druid. The console does most of this for you, but you need to create JSON-type dimensions for the three nested columns in the data. 
 
     Click **Add dimension** and enter the following information. You can only add one dimension at a time.
     - Name: `event`, Type: `json`
diff --git a/docs/tutorials/tutorial-kerberos-hadoop.md b/docs/tutorials/tutorial-kerberos-hadoop.md
index 254e23facb..24fc290b6a 100644
--- a/docs/tutorials/tutorial-kerberos-hadoop.md
+++ b/docs/tutorials/tutorial-kerberos-hadoop.md
@@ -1,7 +1,7 @@
 ---
 id: tutorial-kerberos-hadoop
-title: "Configuring Apache Druid to use Kerberized Apache Hadoop as deep storage"
-sidebar_label: "Kerberized HDFS deep storage"
+title: Configure Apache Druid to use Kerberized Apache Hadoop as deep storage
+sidebar_label: Kerberized HDFS deep storage
 ---
 
 <!--
diff --git a/docs/tutorials/tutorial-msq-convert-spec.md b/docs/tutorials/tutorial-msq-convert-spec.md
index 4ccd0dc5fe..e989135740 100644
--- a/docs/tutorials/tutorial-msq-convert-spec.md
+++ b/docs/tutorials/tutorial-msq-convert-spec.md
@@ -1,7 +1,7 @@
 ---
 id: tutorial-msq-convert-spec
-title: "Tutorial: Convert an ingestion spec for SQL-based ingestion"
-sidebar_label: "Convert ingestion spec to SQL"
+title: Convert an ingestion spec for SQL-based ingestion
+sidebar_label: Convert ingestion spec to SQL
 description: How to convert an ingestion spec to a query for SQL-based ingestion in the web console.
 ---
 
diff --git a/docs/tutorials/tutorial-msq-extern.md b/docs/tutorials/tutorial-msq-extern.md
index 3293cefcc1..931c28d622 100644
--- a/docs/tutorials/tutorial-msq-extern.md
+++ b/docs/tutorials/tutorial-msq-extern.md
@@ -1,7 +1,7 @@
 ---
 id: tutorial-msq-extern
-title: "Tutorial: Load files with SQL-based ingestion"
-sidebar_label: "Load files using SQL πŸ†•"
+title: Load files with SQL-based ingestion
+sidebar_label: Load files using SQL
 description: How to generate a query that references externally hosted data
 ---
 
diff --git a/docs/tutorials/tutorial-query.md b/docs/tutorials/tutorial-query.md
index 212528e479..4769968b23 100644
--- a/docs/tutorials/tutorial-query.md
+++ b/docs/tutorials/tutorial-query.md
@@ -1,7 +1,7 @@
 ---
 id: tutorial-query
-title: "Tutorial: Querying data"
-sidebar_label: "Querying data"
+title: Query data
+sidebar_label: Query data
 ---
 
 <!--
@@ -30,9 +30,9 @@ It assumes that you've completed the [Quickstart](../tutorials/index.md)
 or one of the following tutorials, since we'll query datasources that you would have created
 by following one of them:
 
-* [Tutorial: Loading a file](../tutorials/tutorial-batch.md)
-* [Tutorial: Loading stream data from Kafka](../tutorials/tutorial-kafka.md)
-* [Tutorial: Loading a file using Hadoop](../tutorials/tutorial-batch-hadoop.md)
+* [Load a file](../tutorials/tutorial-batch.md)
+* [Load stream data from Kafka](../tutorials/tutorial-kafka.md)
+* [Load a file using Hadoop](../tutorials/tutorial-batch-hadoop.md)
 
 There are various ways to run Druid SQL queries: from the web console, using a command line utility
 and by posting the query by HTTP. We'll look at each of these. 
@@ -176,7 +176,7 @@ ORDER BY SUM(added) DESC
 ## Query SQL over HTTP
 
 
-You can submit native queries [over HTTP](../querying/sql-api.md#submit-a-query). The request body is a JSON object in which the `query` field contains the text of the query:
+You can submit native queries [directly to the Druid Broker over HTTP](../api-reference/sql-api.md#submit-a-query). The request body should be a JSON object, with the value for the key `query` containing text of the query:
 
 ```json
 {
diff --git a/docs/tutorials/tutorial-retention.md b/docs/tutorials/tutorial-retention.md
index a44f3ca997..6beca6255b 100644
--- a/docs/tutorials/tutorial-retention.md
+++ b/docs/tutorials/tutorial-retention.md
@@ -1,7 +1,7 @@
 ---
 id: tutorial-retention
-title: "Tutorial: Configuring data retention"
-sidebar_label: "Configuring data retention"
+title: Configure data retention
+sidebar_label: Configure data retention
 ---
 
 <!--
@@ -29,7 +29,7 @@ This tutorial demonstrates how to configure retention rules on a datasource to s
 For this tutorial, we'll assume you've already downloaded Apache Druid as described in
 the [single-machine quickstart](index.md) and have it running on your local machine.
 
-It will also be helpful to have finished [Tutorial: Loading a file](../tutorials/tutorial-batch.md) and [Tutorial: Querying data](../tutorials/tutorial-query.md).
+It will also be helpful to have finished [Load a file](../tutorials/tutorial-batch.md) and [Query data](../tutorials/tutorial-query.md) tutorials.
 
 ## Load the example data
 
diff --git a/docs/tutorials/tutorial-rollup.md b/docs/tutorials/tutorial-rollup.md
index 5081aa8f5e..b2c74d7e5b 100644
--- a/docs/tutorials/tutorial-rollup.md
+++ b/docs/tutorials/tutorial-rollup.md
@@ -1,7 +1,7 @@
 ---
 id: tutorial-rollup
-title: "Tutorial: Roll-up"
-sidebar_label: "Roll-up"
+title: Aggregate data with rollup
+sidebar_label: Aggregate data with rollup
 ---
 
 <!--
@@ -24,14 +24,14 @@ sidebar_label: "Roll-up"
   -->
 
 
-Apache Druid can summarize raw data at ingestion time using a process we refer to as "roll-up". Roll-up is a first-level aggregation operation over a selected set of columns that reduces the size of stored data.
+Apache Druid can summarize raw data at ingestion time using a process we refer to as "rollup". Rollup is a first-level aggregation operation over a selected set of columns that reduces the size of stored data.
 
-This tutorial will demonstrate the effects of roll-up on an example dataset.
+This tutorial will demonstrate the effects of rollup on an example dataset.
 
 For this tutorial, we'll assume you've already downloaded Druid as described in
 the [single-machine quickstart](index.md) and have it running on your local machine.
 
-It will also be helpful to have finished [Tutorial: Loading a file](../tutorials/tutorial-batch.md) and [Tutorial: Querying data](../tutorials/tutorial-query.md).
+It will also be helpful to have finished [Load a file](../tutorials/tutorial-batch.md) and [Query data](../tutorials/tutorial-query.md) tutorials.
 
 ## Example data
 
@@ -105,7 +105,7 @@ We'll ingest this data using the following ingestion task spec, located at `quic
 }
 ```
 
-Roll-up has been enabled by setting `"rollup" : true` in the `granularitySpec`.
+Rollup has been enabled by setting `"rollup" : true` in the `granularitySpec`.
 
 Note that we have `srcIP` and `dstIP` defined as dimensions, a longSum metric is defined for the `packets` and `bytes` columns, and the `queryGranularity` has been defined as `minute`.
 
@@ -181,7 +181,7 @@ Likewise, these two events that occurred during `2018-01-01T01:02` have been rol
 β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜
 ```
 
-For the last event recording traffic between 1.1.1.1 and 2.2.2.2, no roll-up took place, because this was the only event that occurred during `2018-01-01T01:03`:
+For the last event recording traffic between 1.1.1.1 and 2.2.2.2, no rollup took place, because this was the only event that occurred during `2018-01-01T01:03`:
 
 ```json
 {"timestamp":"2018-01-01T01:03:29Z","srcIP":"1.1.1.1", "dstIP":"2.2.2.2","packets":49,"bytes":10204}
diff --git a/docs/tutorials/tutorial-sql-query-view.md b/docs/tutorials/tutorial-sql-query-view.md
index da47de684c..beeb08e15d 100644
--- a/docs/tutorials/tutorial-sql-query-view.md
+++ b/docs/tutorials/tutorial-sql-query-view.md
@@ -1,7 +1,7 @@
 ---
 id: tutorial-sql-query-view
-title: "Tutorial: Get to know Query view"
-sidebar_label: "Get to know Query view"
+title: Get to know Query view
+sidebar_label: Get to know Query view
 ---
 
 <!--
@@ -28,7 +28,7 @@ This tutorial demonstrates some useful features built into Query view in Apache
 
 Query view lets you run [Druid SQL queries](../querying/sql.md) and [native (JSON-based) queries](../querying/querying.md) against ingested data. Try out the [Introduction to Druid SQL](./tutorial-jupyter-index.md#tutorials) tutorial to learn more about Druid SQL.
 
-You can use Query view to test and tune queries before you use them in API requests&mdash;for example, to perform [SQL-based ingestion](../multi-stage-query/api.md). You can also ingest data directly in Query view.
+You can use Query view to test and tune queries before you use them in API requests&mdash;for example, to perform [SQL-based ingestion](../api-reference/sql-ingestion-api.md). You can also ingest data directly in Query view.
 
 The tutorial guides you through the steps to ingest sample data and query the ingested data using some Query view features.
 
diff --git a/docs/tutorials/tutorial-transform-spec.md b/docs/tutorials/tutorial-transform-spec.md
index cbcf1d7166..4ce320e3c0 100644
--- a/docs/tutorials/tutorial-transform-spec.md
+++ b/docs/tutorials/tutorial-transform-spec.md
@@ -1,7 +1,7 @@
 ---
 id: tutorial-transform-spec
-title: "Tutorial: Transforming input data"
-sidebar_label: "Transforming input data"
+title: Transform input data
+sidebar_label: Transform input data
 ---
 
 <!--
@@ -29,7 +29,7 @@ This tutorial will demonstrate how to use transform specs to filter and transfor
 For this tutorial, we'll assume you've already downloaded Apache Druid as described in
 the [single-machine quickstart](index.md) and have it running on your local machine.
 
-It will also be helpful to have finished [Tutorial: Loading a file](../tutorials/tutorial-batch.md) and [Tutorial: Querying data](../tutorials/tutorial-query.md).
+It will also be helpful to have finished [Load a file](../tutorials/tutorial-batch.md) and [Query data](../tutorials/tutorial-query.md) tutorials.
 
 ## Sample data
 
diff --git a/docs/tutorials/tutorial-update-data.md b/docs/tutorials/tutorial-update-data.md
index a5d1553132..aa85a2aca7 100644
--- a/docs/tutorials/tutorial-update-data.md
+++ b/docs/tutorials/tutorial-update-data.md
@@ -1,7 +1,7 @@
 ---
 id: tutorial-update-data
-title: "Tutorial: Updating existing data"
-sidebar_label: "Updating existing data"
+title: Update existing data
+sidebar_label: Update existing data
 ---
 
 <!--
@@ -32,9 +32,9 @@ Before starting this tutorial, download and run Apache Druid on your local machi
 the [single-machine quickstart](index.md).
 
 You should also be familiar with the material in the following tutorials:
-* [Tutorial: Loading a file](../tutorials/tutorial-batch.md)
-* [Tutorial: Querying data](../tutorials/tutorial-query.md)
-* [Tutorial: Roll-up](../tutorials/tutorial-rollup.md)
+* [Load a file](../tutorials/tutorial-batch.md)
+* [Query data](../tutorials/tutorial-query.md)
+* [Rollup](../tutorials/tutorial-rollup.md)
 
 ## Load initial data
 
diff --git a/examples/quickstart/jupyter-notebooks/druidapi/druidapi/datasource.py b/examples/quickstart/jupyter-notebooks/druidapi/druidapi/datasource.py
index 4e0febaef4..8c9f364a35 100644
--- a/examples/quickstart/jupyter-notebooks/druidapi/druidapi/datasource.py
+++ b/examples/quickstart/jupyter-notebooks/druidapi/druidapi/datasource.py
@@ -30,7 +30,7 @@ class DatasourceClient:
     Client for datasource APIs. Prefer to use SQL to query the
     INFORMATION_SCHEMA to obtain information.
 
-    See https://druid.apache.org/docs/latest/operations/api-reference.html#datasources
+    See https://druid.apache.org/docs/latest/api-reference/api-reference.html#datasources
     '''
 
     def __init__(self, rest_client):
diff --git a/examples/quickstart/jupyter-notebooks/druidapi/druidapi/status.py b/examples/quickstart/jupyter-notebooks/druidapi/druidapi/status.py
index bf26db7e20..89141d2685 100644
--- a/examples/quickstart/jupyter-notebooks/druidapi/druidapi/status.py
+++ b/examples/quickstart/jupyter-notebooks/druidapi/druidapi/status.py
@@ -35,7 +35,7 @@ class StatusClient:
 
     You can find the service endpoints by querying the sys.servers table using SQL.
 
-    See https://druid.apache.org/docs/latest/operations/api-reference.html#process-information
+    See https://druid.apache.org/docs/latest/api-reference/api-reference.html#process-information
     '''
     
     def __init__(self, rest_client, owns_client=False):
diff --git a/examples/quickstart/jupyter-notebooks/druidapi/druidapi/tasks.py b/examples/quickstart/jupyter-notebooks/druidapi/druidapi/tasks.py
index c1e5666519..0c428eda12 100644
--- a/examples/quickstart/jupyter-notebooks/druidapi/druidapi/tasks.py
+++ b/examples/quickstart/jupyter-notebooks/druidapi/druidapi/tasks.py
@@ -27,7 +27,7 @@ class TaskClient:
     '''
     Client for Overlord task-related APIs.
 
-    See https://druid.apache.org/docs/latest/operations/api-reference.html#tasks
+    See https://druid.apache.org/docs/latest/api-reference/api-reference.html#tasks
     '''
 
     def __init__(self, rest_client):
diff --git a/services/src/main/java/org/apache/druid/cli/CliCoordinator.java b/services/src/main/java/org/apache/druid/cli/CliCoordinator.java
index 85d2e41f54..7e40a816de 100644
--- a/services/src/main/java/org/apache/druid/cli/CliCoordinator.java
+++ b/services/src/main/java/org/apache/druid/cli/CliCoordinator.java
@@ -259,7 +259,7 @@ public class CliCoordinator extends ServerRunnable
                   "'druid.coordinator.merge.on' is not supported anymore. "
                   + "Please consider using Coordinator's automatic compaction instead. "
                   + "See https://druid.apache.org/docs/latest/operations/segment-optimization.html and "
-                  + "https://druid.apache.org/docs/latest/operations/api-reference.html#compaction-configuration "
+                  + "https://druid.apache.org/docs/latest/api-reference/api-reference.html#compaction-configuration "
                   + "for more details about compaction."
               );
             }
diff --git a/web-console/src/druid-models/input-source/input-source.tsx b/web-console/src/druid-models/input-source/input-source.tsx
index 344510300a..8c80bba801 100644
--- a/web-console/src/druid-models/input-source/input-source.tsx
+++ b/web-console/src/druid-models/input-source/input-source.tsx
@@ -77,7 +77,7 @@ export type InputSourceDesc =
       dataSource: string;
       interval: string;
       filter?: any;
-      dimensions?: string[]; // ToDo: these are not in the docs https://druid.apache.org/docs/latest/ingestion/native-batch-input-sources.html
+      dimensions?: string[]; // ToDo: these are not in the docs https://druid.apache.org/docs/latest/ingestion/input-sources.html
       metrics?: string[];
       maxInputSegmentBytesPerTask?: number;
     }
diff --git a/web-console/src/links.ts b/web-console/src/links.ts
index 33b58a8acc..c69393b942 100644
--- a/web-console/src/links.ts
+++ b/web-console/src/links.ts
@@ -82,7 +82,7 @@ export function getLink(linkName: LinkNames): string {
     case 'DOCS_RUNE':
       return `${links.docsHref}/querying/querying.html`;
     case 'DOCS_API':
-      return `${links.docsHref}/operations/api-reference.html`;
+      return `${links.docsHref}/api-reference/api-reference.html`;
     case 'DOCS_MSQ_ERROR':
       return `${links.docsHref}/multi-stage-query/reference.html`;
     case 'COMMUNITY':
diff --git a/web-console/src/views/sql-data-loader-view/schema-step/schema-step.tsx b/web-console/src/views/sql-data-loader-view/schema-step/schema-step.tsx
index 00b1e1ff2f..ad26869243 100644
--- a/web-console/src/views/sql-data-loader-view/schema-step/schema-step.tsx
+++ b/web-console/src/views/sql-data-loader-view/schema-step/schema-step.tsx
@@ -950,7 +950,7 @@ export const SchemaStep = function SchemaStep(props: SchemaStepProps) {
                 <AnchorButton
                   icon={IconNames.HELP}
                   text="Learn more..."
-                  href={`${getLink('DOCS')}/ingestion/data-model.html#primary-timestamp`}
+                  href={`${getLink('DOCS')}/ingestion/schema-model.html#primary-timestamp`}
                   target="_blank"
                   intent={Intent.WARNING}
                   minimal
diff --git a/website/.spelling b/website/.spelling
index 01b5f27ead..22b38f3bff 100644
--- a/website/.spelling
+++ b/website/.spelling
@@ -185,6 +185,7 @@ ParAccel
 ParseSpec
 ParseSpecs
 Protobuf
+protobuf
 pull-deps
 RDBMS
 RDDs
@@ -252,6 +253,7 @@ base64
 big-endian
 bigint
 blobstore
+Boolean
 boolean
 breakpoint
 broadcasted
@@ -496,6 +498,7 @@ symlink
 syntaxes
 tiering
 timeseries
+Timeseries
 timestamp
 timestamps
 to_json_string
@@ -549,6 +552,7 @@ whitespace
 wildcard
 wildcards
 xml
+XOR
 znode
 znodes
 APPROX_COUNT_DISTINCT
@@ -742,27 +746,20 @@ APPROX_COUNT_DISTINCT_DS_THETA
 APPROX_QUANTILE_DS
 DS_QUANTILES_SKETCH
 APPROX_QUANTILE_FIXED_BUCKETS
-
 # File specific overrides
- - ../docs/comparisons/druid-vs-elasticsearch.md
 100x
- - ../docs/configuration/logging.md
 _common
 appender
 appenders
- - ../docs/dependencies/deep-storage.md
 druid-hdfs-storage
 druid-s3-extensions
 druid.sql.planner.maxNumericInFilters
 Minio
 multi-server
- - ../docs/dependencies/metadata-storage.md
 BasicDataSource
- - ../docs/dependencies/zookeeper.md
 LeaderLatch
 3.5.x
 3.4.x
- - ../docs/design/auth.md
 AllowAll
 AuthenticationResult
 AuthorizationLoadingLookupTest
@@ -770,10 +767,8 @@ HttpClient
 allowAll
 authenticatorChain
 defaultUser
- - ../docs/design/coordinator.md
 inputSegmentSizeBytes
 skipOffsetFromLatest
- - ../docs/design/router.md
 brokerService
 c3.2xlarge
 defaultManualBrokerService
@@ -781,7 +776,6 @@ maxPriority
 minPriority
 runtime.properties
 timeBoundary
- - ../docs/design/segments.md
 0x0
 0x9
 2GB
@@ -792,7 +786,6 @@ Ke
 datasource_intervalStart_intervalEnd_version_partitionNum
 partitionNum
 v9
- - ../docs/development/build.md
 3.x
 8u92
 DskipTests
@@ -806,11 +799,9 @@ hadoop3
 hadoop2
 2.x.x
 3.x.x
- - ../docs/development/extensions-contrib/ambari-metrics-emitter.md
 ambari-metrics
 metricName
 trustStore
- - ../docs/development/extensions-core/azure.md
 StaticAzureBlobStoreFirehose
 StaticS3Firehose
 fetchTimeout
@@ -821,7 +812,6 @@ maxFetchRetry
 prefetchTriggerBytes
 shardSpecs
 sharedAccessStorageToken
- - ../docs/development/extensions-contrib/cloudfiles.md
 StaticCloudFilesFirehose
 cloudfiles
 rackspace-cloudfiles-uk
@@ -833,7 +823,6 @@ maxCacheCapacityBytes
 maxFetchCapacityBytes
 fetchTimeout
 maxFetchRetry
- - ../docs/development/extensions-contrib/distinctcount.md
 distinctCount
 groupBy
 maxIntermediateRows
@@ -842,20 +831,17 @@ queryGranularity
 segmentGranularity
 topN
 visitor_id
- - ../docs/development/extensions-contrib/influx.md
 cpu
 web_requests
- - ../docs/development/extensions-contrib/influxdb-emitter.md
 _
 druid_
 druid_cache_total
 druid_hits
 druid_query
 historical001
- - ../docs/development/extensions-contrib/materialized-view.md
 HadoopTuningConfig
 TuningConfig
-base-dataSource's
+base-dataSource
 baseDataSource
 baseDataSource-hashCode
 classpathPrefix
@@ -866,12 +852,10 @@ maxTaskCount
 metricsSpec
 queryType
 tuningConfig
- - ../docs/development/extensions-contrib/momentsketch-quantiles.md
 arcsinh
 fieldName
 momentSketchMerge
 momentsketch
- - ../docs/development/extensions-contrib/moving-average-query.md
 10-minutes
 MeanNoNulls
 P1D
@@ -895,31 +879,24 @@ movingAverage
 postAggregations
 postAveragers
 pull-deps
- - ../docs/development/extensions-contrib/opentsdb-emitter.md
 defaultMetrics.json
 namespacePrefix
 src
- - ../docs/development/extensions-contrib/redis-cache.md
 loadList
 pull-deps
 PT2S
- - ../docs/development/extensions-contrib/sqlserver.md
 com.microsoft.sqlserver.jdbc.SQLServerDriver
 sqljdbc
- - ../docs/development/extensions-contrib/statsd.md
 convertRange
-- ../docs/development/extensions-contrib/prometheus.md
 HTTPServer
 conversionFactor
 prometheus
 Pushgateway
 flushPeriod
- - ../docs/development/extensions-contrib/tdigestsketch-quantiles.md
 postAggregator
 quantileFromTDigestSketch
 quantilesFromTDigestSketch
 tDigestSketch
- - ../docs/development/extensions-contrib/thrift.md
 HadoopDruidIndexer
 LzoThriftBlock
 SequenceFile
@@ -931,10 +908,8 @@ ioConfig
 parseSpec
 thriftClass
 thriftJar
- - ../docs/development/extensions-contrib/time-min-max.md
 timeMax
 timeMin
- - ../docs/development/extensions-contrib/aliyun-oss-extensions.md
 Alibaba
 Aliyun
 aliyun-oss-extensions
@@ -946,7 +921,6 @@ OSS
 oss
 secretKey
 url
- - ../docs/development/extensions-core/approximate-histograms.md
 approxHistogram
 approxHistogramFold
 fixedBucketsHistogram
@@ -954,7 +928,6 @@ bucketNum
 lowerLimit
 numBuckets
 upperLimit
- - ../docs/development/extensions-core/avro.md
 AVRO-1124
 Avro-1124
 SchemaRepo
@@ -968,34 +941,28 @@ schemaRepository
 schema_inline
 subjectAndIdConverter
 url
- - ../docs/development/extensions-core/bloom-filter.md
 BloomKFilter
 bitset
 outputStream
- - ../docs/development/extensions-core/datasketches-hll.md
 HLLSketchBuild
 HLLSketchMerge
 lgK
 log2
 tgtHllType
- - ../docs/development/extensions-core/datasketches-quantiles.md
 CDF
 DoublesSketch
 maxStreamLength
 PMF
 quantilesDoublesSketch
 toString
- - ../docs/development/extensions-core/datasketches-theta.md
 isInputThetaSketch
 thetaSketch
 user_id
- - ../docs/development/extensions-core/datasketches-tuple.md
 ArrayOfDoublesSketch
 arrayOfDoublesSketch
 metricColumns
 nominalEntries
 numberOfValues
- - ../docs/development/extensions-core/druid-basic-security.md
 INFORMATION_SCHEMA
 MyBasicAuthenticator
 MyBasicAuthorizer
@@ -1016,14 +983,12 @@ objectClass
 initialAdminRole
 adminGroupMapping
 groupMappingName
- - ../docs/development/extensions-core/druid-kerberos.md
 8KiB
 HttpComponents
 MyKerberosAuthenticator
 RFC-4559
 SPNego
 _HOST
- - ../docs/development/extensions-core/druid-lookups.md
 cacheFactory
 concurrencyLevel
 dataFetcher
@@ -1037,32 +1002,26 @@ maximumSize
 onHeapPolling
 pollPeriod
 reverseLoadingCacheSpec
- - ../docs/development/extensions-core/druid-pac4j.md
 OAuth
 Okta
 OpenID
 pac4j
- - ../docs/development/extensions-core/kubernetes.md
 Env
 POD_NAME
 POD_NAMESPACE
 ConfigMap
 PT17S
- - ../docs/development/extensions-core/google.md
 GCS
 StaticGoogleBlobStoreFirehose
- - ../docs/development/extensions-core/hdfs.md
 gcs-connector
 hadoop2
 hdfs
- - ../docs/development/extensions-core/kafka-extraction-namespace.md
 Aotearoa
 Czechia
 KTable
 LookupExtractorFactory
 Zeelund
-zookeeper.connect
- - ../docs/development/extensions-core/kafka-ingestion.md
+zookeeper.connect 
 0.11.x.
 00Z
 2016-01-01T11
@@ -1103,7 +1062,6 @@ metricCompression
 numKafkaPartitions
 taskCount
 taskDuration
- - ../docs/development/extensions-core/kinesis-ingestion.md
 9.2dist
 KinesisSupervisorIOConfig
 KinesisSupervisorTuningConfig
@@ -1139,21 +1097,16 @@ KCL
 signalled
 ProvisionedThroughputExceededException
 Deaggregation
- - ../docs/development/extensions-core/lookups-cached-global.md
 baz
 customJson
 lookupParseSpec
 namespaceParseSpec
 simpleJson
- - ../docs/development/extensions-core/orc.md
 dimensionSpec
 flattenSpec
- - ../docs/development/extensions-core/parquet.md
 binaryAsString
- - ../docs/development/extensions-core/postgresql.md
-sslFactory's
+sslFactory
 sslMode
- - ../docs/development/extensions-core/protobuf.md
 Proto
 metrics.desc
 metrics.desc.
@@ -1162,7 +1115,6 @@ metrics_pb
 protoMessageType
 timeAndDims
 tmp
- - ../docs/development/extensions-core/s3.md
 SigV4
 jvm.config
 kms
@@ -1170,11 +1122,9 @@ s3
 s3a
 s3n
 uris
- - ../docs/development/extensions-core/simple-client-sslcontext.md
 KeyManager
 SSLContext
 TrustManager
- - ../docs/development/extensions-core/stats.md
 GenericUDAFVariance
 Golub
 J.L.
@@ -1190,7 +1140,6 @@ variance2
 varianceFold
 variance_pop
 variance_sample
- - ../docs/development/extensions-core/test-stats.md
 Berry_statbook
 Berry_statbook_chpt6.pdf
 S.E.
@@ -1210,7 +1159,6 @@ www.ucs.louisiana.edu
 zscore
 zscore2sample
 ztests
- - ../docs/development/extensions.md
 DistinctCount
 artifactId
 com.example
@@ -1233,16 +1181,13 @@ org.apache.druid.extensions.contrib.
 pull-deps
 sqlserver-metadata-storage
 statsd-emitter
- - ../docs/development/geo.md
 coords
 dimName
 maxCoords
 Mb
 minCoords
- - ../docs/development/javascript.md
 Metaspace
 dev
- - ../docs/development/modules.md
 AggregatorFactory
 ArchiveTask
 ComplexMetrics
@@ -1261,13 +1206,13 @@ ObjectMapper
 PasswordProvider
 PostAggregators
 QueryRunnerFactory
+segmentmetadataquery
 SegmentMetadataQuery
 SegmentMetadataQueryQueryToolChest
 StaticS3FirehoseFactory
 loadSpec
 multibind
 pom.xml
- - ../docs/ingestion/data-formats.md
 0.6.x
 0.7.x
 0.7.x.
@@ -1294,19 +1239,16 @@ timestampColumnName
 timestampSpec
 urls
 valueFormat
- - ../docs/data-management/compaction.md
 1GB
 IOConfig
 compactionTask
 compactionTasks
 ingestSegmentFirehose
 numShards
- - ../docs/ingestion/faq.md
 IngestSegment
 IngestSegmentFirehose
 maxSizes
 windowPeriod
- - ../docs/ingestion/hadoop.md
 2012-01-01T00
 2012-01-03T00
 2012-01-05T00
@@ -1363,7 +1305,6 @@ useNewAggs
 useYarnRMJobStatusFallback
 workingPath
 z.example.com
- - ../docs/ingestion/native-batch.md
 150MB
 CombiningFirehose
 DataSchema
@@ -1385,7 +1326,7 @@ chatHandlerTimeout
 cityName
 connectorConfig
 countryName
-dataSchema's
+dataSchema
 dropExisting
 foldCase
 forceGuaranteedRollup
@@ -1412,7 +1353,6 @@ totalNumMergeTasks
 StaticS3Firehose
 prefetchTriggerBytes
 awaitSegmentAvailabilityTimeoutMillis
- - ../docs/ingestion/native-batch-firehose.md
 LocalFirehose
 baseDir
 HttpFirehose
@@ -1429,14 +1369,11 @@ connectorConfig
 InlineFirehose
 CombiningFirehose
 httpAuthenticationPassword
- - ../docs/ingestion/native-batch-input-source.md
 accessKeyId
 secretAccessKey
 accessKeyId
 httpAuthenticationPassword
 countryName
- - ../docs/ingestion/native-batch-simple-task.md
-dataSchema's
 appendToExisting
 dropExisting
 timeChunk
@@ -1445,17 +1382,14 @@ forceGuaranteedRollup
 reportParseExceptions
 pushTimeout
 segmentWriteOutMediumFactory
- - ../docs/ingestion/schema-design.md
 product_category
 product_id
 product_name
- - ../docs/ingestion/tasks.md
 BUILD_SEGMENTS
 DETERMINE_PARTITIONS
 forceTimeChunkLock
 taskLockTimeout
 index.md
- - ../docs/misc/math-expr.md
 DOUBLE_ARRAY
 DOY
 DateTimeFormat
@@ -1471,6 +1405,7 @@ arr1
 arr2
 array_append
 array_concat
+ARRAY_CONCAT
 array_set_add
 array_set_add_all
 array_contains
@@ -1543,6 +1478,7 @@ str1
 str2
 string_to_array
 stringAny
+Strlen
 strlen
 strpos
 timestamp_ceil
@@ -1562,9 +1498,7 @@ IEC
 human_readable_binary_byte_format
 human_readable_decimal_byte_format
 human_readable_decimal_format
- - ../docs/misc/papers-and-talks.md
 RADStack
- - ../docs/operations/api-reference.md
 00.000Z
 2015-09-12T03
 2015-09-12T05
@@ -1585,7 +1519,7 @@ segmentId2
 taskId
 taskid
 un
- - ../docs/operations/basic-cluster-tuning.md
+ 
 100MiB
 128MiB
 15ms
@@ -1600,7 +1534,6 @@ un
 G1GC
 GroupBys
 QoS-type
- - ../docs/operations/dump-segment.md
 DumpSegment
 SegmentMetadata
 __time
@@ -1608,14 +1541,10 @@ bitmapSerdeFactory
 columnName
 index.zip
 time-iso8601
- - ../docs/operations/export-metadata.md
 hadoopStorageDirectory
- - ../docs/operations/insert-segment-to-db.md
 0.14.x
- - ../docs/operations/java.md
 G1
 Temurin
- - ../docs/operations/metrics.md
 0.14.x
 1s
 Bufferpool
@@ -1669,7 +1598,6 @@ taskType
 threadPoolNumBusyThreads.
 threadPoolNumIdleThreads
 threadPoolNumTotalThreads.
- - ../docs/operations/other-hadoop.md
 CDH
 Classloader
 assembly.sbt
@@ -1680,17 +1608,14 @@ mapred-default
 mapred-site
 sbt
 scala-2
- - ../docs/operations/pull-deps.md
 org.apache.hadoop
 proxy.com.
 remoteRepository
- - ../docs/operations/recommendations.md
 JBOD
 druid.processing.buffer.sizeBytes.
 druid.processing.numMergeBuffers
 druid.processing.numThreads
 tmpfs
- - ../docs/operations/rule-configuration.md
 broadcastByInterval
 broadcastByPeriod
 broadcastForever
@@ -1702,9 +1627,7 @@ dropForever
 loadByInterval
 loadByPeriod
 loadForever
- - ../docs/operations/segment-optimization.md
 700MB
- - ../docs/operations/single-server.md
 128GiB
 16GiB
 256GiB
@@ -1717,18 +1640,13 @@ i3.16xlarge
 i3.2xlarge
 i3.4xlarge
 i3.8xlarge
- - ../docs/operations/tls-support.md
 CN
 subjectAltNames
- - ../docs/querying/aggregations.md
 HyperUnique
 hyperUnique
 longSum
- - ../docs/querying/datasource.md
 groupBys
- - ../docs/querying/datasourcemetadataquery.md
 dataSourceMetadata
- - ../docs/querying/dimensionspecs.md
 ExtractionDimensionSpec
 SimpleDateFormat
 bar_1
@@ -1743,7 +1661,6 @@ timeFormat
 tz
 v3
 weekyears
- - ../docs/querying/filters.md
 ___bar
 caseSensitive
 extractionFn
@@ -1751,13 +1668,11 @@ insensitive_contains
 last_name
 lowerStrict
 upperStrict
- - ../docs/querying/granularities.md
 1970-01-01T00
 P2W
 PT0.750S
 PT1H30M
 TimeseriesQuery
- - ../docs/querying/groupbyquery.md
 D1
 D2
 D3
@@ -1776,30 +1691,24 @@ pushdown
 row1
 subtotalsSpec
 tradeoff
- - ../docs/querying/having.md
 HavingSpec
 HavingSpecs
 dimSelector
 equalTo
 greaterThan
 lessThan
- - ../docs/querying/hll-old.md
 DefaultDimensionSpec
 druid-hll
 isInputHyperUnique
- - ../docs/querying/joins.md
 pre-join
- - ../docs/querying/limitspec.md
 DefaultLimitSpec
 OrderByColumnSpec
 OrderByColumnSpecs
 dimensionOrder
- - ../docs/querying/lookups.md
 60_000
 kafka-extraction-namespace
 mins
-tierName
- - ../docs/querying/multi-value-dimensions.md
+tierName 
 row2
 row3
 row4
@@ -1808,14 +1717,11 @@ t4
 t5
 groupByEnableMultiValueUnnesting
 unnesting
- - ../docs/querying/multitenancy.md
 500ms
 tenant_id
- - ../docs/querying/post-aggregations.md
 fieldAccess
 finalizingFieldAccess
 hyperUniqueCardinality
- - ../docs/querying/query-context.md
 brokerService
 bySegment
 doubleSum
@@ -1854,14 +1760,12 @@ enableJoinFilterPushDown
 enableJoinFilterRewrite
 enableJoinFilterRewriteValueColumnFilters
 joinFilterRewriteMaxSize
- - ../docs/querying/querying.md
 7KiB
 DatasourceMetadata
 TimeBoundary
 errorClass
 errorMessage
 x-jackson-smile
- - ../docs/querying/scan-query.md
 batchSize
 compactedList
 druid.query.scan.legacy
@@ -1871,7 +1775,6 @@ maxRowsQueuedForOrdering
 maxSegmentPartitionsOrderedInMemory
 resultFormat
 valueVector
- - ../docs/querying/searchquery.md
 SearchQuerySpec
 cursorOnly
 druid.query.search.searchStrategy
@@ -1879,45 +1782,35 @@ queryableIndexSegment
 searchDimensions
 searchStrategy
 useIndexes
- - ../docs/querying/searchqueryspec.md
 ContainsSearchQuerySpec
 FragmentSearchQuerySpec
 InsensitiveContainsSearchQuerySpec
 RegexSearchQuerySpec
- - ../docs/querying/segmentmetadataquery.md
 analysisType
 analysisTypes
 lenientAggregatorMerge
 minmax
 segmentMetadata
 toInclude
- - ../docs/querying/select-query.md
 PagingSpec
 fromNext
 pagingSpec
- - ../docs/querying/sorting-orders.md
 BoundFilter
-GroupByQuery's
+GroupByQuery
 SearchQuery
 TopNMetricSpec
 compareTo
 file12
 file2
- - ../docs/querying/sql-operators.md
 _x_
- - ../docs/querying/timeseriesquery.md
 fieldName1
 fieldName2
- - ../docs/querying/topnmetricspec.md
 DimensionTopNMetricSpec
 metricSpec
 previousStop
- - ../docs/querying/topnquery.md
 GroupByQuery
 top500
- - ../docs/querying/virtual-columns.md
 outputType
- - ../docs/tutorials/cluster.md
 1.9TB
 16CPU
 WebUpd8
@@ -1925,18 +1818,18 @@ m5.2xlarge
 metadata.storage.
 256GiB
 128GiB
- - ../docs/tutorials/tutorial-batch-hadoop.md
+ 
 PATH_TO_DRUID
 namenode
- - ../docs/tutorials/tutorial-delete-data.md
+ 
 segmentID
 segmentIds
- - ../docs/tutorials/tutorial-ingestion-spec.md
+ 
 dstIP
 dstPort
 srcIP
 srcPort
- - ../docs/tutorials/tutorial-kerberos-hadoop.md
+ 
 common_runtime_properties
 druid.extensions.directory
 druid.extensions.loadList
@@ -1949,19 +1842,14 @@ druid.storage.type
 hdfs.headless.keytab
 indexing_log
 keytabs
- - ../docs/tutorials/tutorial-query.md
 dsql
- - ../docs/tutorials/tutorial-retention.md
 2015-09-12T12
- - ../docs/tutorials/tutorial-sketches-theta.md
 clickstreams
 uid
 _k_
 Bridgerton
 Hellmar
- - ../docs/tutorials/tutorial-update-data.md
 bear-111
- - ../docs/configuration/index.md
 10KiB
 2GiB
 512KiB
@@ -2156,9 +2044,7 @@ fillCapacityWithCategorySpec
 WorkerCategorySpec
 workerCategorySpec
 CategoryConfig
- - ../docs/design/index.md
 logsearch
- - ../docs/ingestion/index.md
 2000-01-01T01
 DateTimeFormat
 JsonPath
@@ -2171,7 +2057,6 @@ missingValue
 skipBytesInMemoryOverheadCheck
 spatialDimensions
 useFieldDiscovery
- - ../docs/tutorials/index.md
 4CPU
 cityName
 countryIsoCode
@@ -2186,7 +2071,6 @@ regionIsoCode
 regionName
 4GiB
 512GiB
- - ../docs/development/extensions-core/druid-ranger-security.md
 json
 metastore
 UserGroupInformation
@@ -2194,7 +2078,6 @@ CVE-2019-17571
 CVE-2019-12399
 CVE-2018-17196
 bin.tar.gz
- - ../docs/configuration/human-readable-byte.md
 0s
 1T
 3G
@@ -2223,7 +2106,6 @@ CDF
 maxStreamLength
 toString
 100TB
-- ../docs/development/extensions-contrib/compressed-big-decimal.md
 compressedBigDecimal
 limitSpec
 metricsSpec
@@ -2232,7 +2114,6 @@ SaleAmount
 IngestionSpec
 druid-compressed-bigdecimal
 doubleSum
- - ../docs/querying/sql-functions.md
 ANY_VALUE
 APPROX_COUNT_DISTINCT_DS_HLL
 APPROX_COUNT_DISTINCT_DS_THETA
@@ -2323,13 +2204,62 @@ KTable
 Aotearoa
 Czechia
 Zeelund
- - ../docs/tutorials/docker.md
- nano
- - ../docs/operations/python.md
+nano
 MacOS
 RHEL
 psutil
 pathlib
-- ../docs/tutorials/tutorial-sql-query-view.md
 kttm_simple
 dist_country
+# Extensions
+druid-avro-extensions
+druid-azure-extensions
+druid-basic-security
+druid-bloom-filter
+druid-datasketches
+druid-google-extensions
+druid-hdfs-storage
+druid-histogram
+druid-kafka-extraction-name
+druid-kafka-indexing-service
+druid-kinesis-indexing-service
+druid-kerberos
+druid-lookups-cached-global
+druid-lookups-cached-single
+druid-multi-stage-query
+druid-orc-extensions
+druid-parquet-extensions
+druid-protobuf-extensions
+druid-ranger-security
+druid-s3-extensions
+druid-ec2-extensions
+druid-aws-rds-extensions
+druid-stats
+mysql-metadata-storage
+postgresql-metadata-storage
+simple-client-sslcontext
+druid-pac4j
+druid-kubernetes-extensions
+aliyun-oss-extensions
+ambari-metrics-emitter
+druid-cassandra-storage
+druid-cloudfiles-extensions
+druid-compressed-bigdecimal
+druid-distinctcount
+druid-redis-cache
+druid-time-min-max
+sqlserver-metadata-storage
+graphite-emitter|Graphite metrics emitter
+statsd-emitter|StatsD metrics emitter
+kafka-emitter|Kafka metrics emitter
+druid-thrift-extensions
+druid-opentsdb-emitter
+materialized-view-selection
+materialized-view-maintenance
+druid-moving-average-query
+druid-influxdb-emitter
+druid-momentsketch
+druid-tdigestsketch
+gce-extensions
+prometheus-emitter
+kubernetes-overlord-extensions
\ No newline at end of file
diff --git a/website/redirects.json b/website/redirects.json
index 8a7a03d12c..17231a2cab 100644
--- a/website/redirects.json
+++ b/website/redirects.json
@@ -17,7 +17,7 @@
 {"source": "DataSource.html", "target": "querying/datasource.html"}
 {"source": "DataSourceMetadataQuery.html", "target": "querying/datasourcemetadataquery.html"}
 {"source": "Data_formats.html", "target": "ingestion/data-formats.html"}
-{"source": "Deep-Storage.html", "target": "dependencies/deep-storage.html"}
+{"source": "Deep-Storage.html", "target": "design/deep-storage.html"}
 {"source": "Design.html", "target": "design/index.html"}
 {"source": "DimensionSpecs.html", "target": "querying/dimensionspecs.html"}
 {"source": "Download.html", "target": "/downloads.html"}
@@ -33,7 +33,7 @@
 {"source": "Examples.html", "target": "tutorials/index.html"}
 {"source": "Filters.html", "target": "querying/filters.html"}
 {"source": "Firehose.html", "target": "ingestion/native-batch-firehose.html"}
-{"source": "GeographicQueries.html", "target": "development/geo.html"}
+{"source": "GeographicQueries.html", "target": "querying/geo.html"}
 {"source": "Granularities.html", "target": "querying/granularities.html"}
 {"source": "GroupByQuery.html", "target": "querying/groupbyquery.html"}
 {"source": "Hadoop-Configuration.html", "target": "ingestion/hadoop.html"}
@@ -41,7 +41,7 @@
 {"source": "Historical-Config.html", "target": "configuration/index.html#historical"}
 {"source": "Historical.html", "target": "design/historical.html"}
 {"source": "Home.html", "target": "design/index.html"}
-{"source": "Including-Extensions.html", "target": "development/extensions.html#loading-extensions"}
+{"source": "Including-Extensions.html", "target": "configuration/extensions.html#loading-extensions"}
 {"source": "Indexing-Service-Config.html", "target": "configuration/index.html#overlord"}
 {"source": "Indexing-Service.html", "target": "design/indexing-service.html"}
 {"source": "Ingestion-FAQ.html", "target": "ingestion/faq.html"}
@@ -54,7 +54,7 @@
 {"source": "Loading-Your-Data.html", "target": "ingestion/index.html"}
 {"source": "Logging.html", "target": "configuration/logging.html"}
 {"source": "Master.html", "target": "design/processes.html"}
-{"source": "Metadata-storage.html", "target": "dependencies/metadata-storage.html"}
+{"source": "Metadata-storage.html", "target": "design/metadata-storage.html"}
 {"source": "Metrics.html", "target": "operations/metrics.html"}
 {"source": "Middlemanager.html", "target": "design/middlemanager.html"}
 {"source": "Modules.html", "target": "development/modules.html"}
@@ -83,8 +83,8 @@
 {"source": "Segments.html", "target": "design/segments.html"}
 {"source": "SelectQuery.html", "target": "querying/select-query.html"}
 {"source": "Simple-Cluster-Configuration.html", "target": "tutorials/cluster.html"}
-{"source": "Spatial-Filters.html", "target": "development/geo.html"}
-{"source": "Spatial-Indexing.html", "target": "development/geo.html"}
+{"source": "Spatial-Filters.html", "target": "querying/geo.html"}
+{"source": "Spatial-Indexing.html", "target": "querying/geo.html"}
 {"source": "Stand-Alone-With-Riak-CS.html", "target": "design/index.html"}
 {"source": "Support.html", "target": "/community/"}
 {"source": "Tasks.html", "target": "ingestion/tasks.html"}
@@ -109,13 +109,13 @@
 {"source": "Tutorials.html", "target": "tutorials/index.html"}
 {"source": "Twitter-Tutorial.html", "target": "tutorials/index.html"}
 {"source": "Versioning.html", "target": "development/versioning.html"}
-{"source": "ZooKeeper.html", "target": "dependencies/zookeeper.html"}
+{"source": "ZooKeeper.html", "target": "design/zookeeper.html"}
 {"source": "alerts.html", "target": "operations/alerts.html"}
 {"source": "comparisons/druid-vs-cassandra.html", "target": "druid-vs-key-value.html"}
 {"source": "comparisons/druid-vs-hadoop.html", "target": "druid-vs-sql-on-hadoop.html"}
 {"source": "comparisons/druid-vs-impala-or-shark.html", "target": "druid-vs-sql-on-hadoop.html"}
 {"source": "comparisons/druid-vs-vertica.html", "target": "druid-vs-redshift.html"}
-{"source": "configuration/auth.html", "target": "../design/auth.html"}
+{"source": "configuration/auth.html", "target": "../operations/auth.html"}
 {"source": "configuration/broker.html", "target": "../configuration/index.html#broker"}
 {"source": "configuration/caching.html", "target": "../configuration/index.html#cache-configuration"}
 {"source": "configuration/coordinator.html", "target": "../configuration/index.html#coordinator"}
@@ -125,8 +125,12 @@
 {"source": "configuration/production-cluster.html", "target": "../tutorials/cluster.html"}
 {"source": "configuration/realtime.html", "target": "../ingestion/standalone-realtime.html"}
 {"source": "configuration/simple-cluster.html", "target": "../tutorials/cluster.html"}
-{"source": "configuration/zookeeper.html", "target": "../dependencies/zookeeper.html"}
+{"source": "configuration/zookeeper.html", "target": "../design/zookeeper.html"}
 {"source": "dependencies/cassandra-deep-storage.html", "target": "../development/extensions-contrib/cassandra.html"}
+{"source": "dependencies/deep-storage.html", "target": "../design/deep-storage.html"}
+{"source": "dependencies/metadata-storage.html", "target": "../design/metadata-storage.html"}
+{"source": "dependencies/zookeeper.html", "target": "../design/zookeeper.html"}
+{"source": "design/auth.html", "target": "../operations/auth.html"}
 {"source": "design/concepts-and-terminology.html", "target": "index.html"}
 {"source": "design/design.html", "target": "index.html"}
 {"source": "design/plumber.html", "target": "../ingestion/standalone-realtime.html"}
@@ -139,6 +143,7 @@
 {"source": "development/community-extensions/kafka-simple.html", "target": "../extensions-core/kafka-ingestion.html"}
 {"source": "development/community-extensions/rabbitmq.html", "target": "../extensions-core/kafka-ingestion.html"}
 {"source": "development/datasketches-aggregators.html", "target": "extensions-core/datasketches-extension.html"}
+{"source": "development/extensions.html", "target": "../configuration/extensions.html"}
 {"source": "development/extensions-contrib/kafka-simple.html", "target": "../../ingestion/standalone-realtime.html"}
 {"source": "development/extensions-contrib/orc.html", "target": "../extensions-core/orc.html"}
 {"source": "development/extensions-contrib/parquet.html", "target":"../../development/extensions-core/parquet.html"}
@@ -149,6 +154,9 @@
 {"source": "development/extensions-core/datasketches-aggregators.html", "target": "datasketches-extension.html"}
 {"source": "development/extensions-core/kafka-eight-firehose.html", "target": "../../ingestion/standalone-realtime.html"}
 {"source": "development/extensions-core/namespaced-lookup.html", "target": "lookups-cached-global.html"}
+{"source": "development/extensions-contrib/google.html", "target": "../extensions-core/google.html"}
+{"source": "development/geo.html", "target": "../querying/geo.html"}
+{"source": "development/integrating-druid-with-other-technologies.html", "target": "../ingestion/index.html"}
 {"source": "development/indexer.html", "target": "../design/indexer.html"}
 {"source": "development/kafka-simple-consumer-firehose.html", "target": "extensions-core/kafka-ingestion.html"}
 {"source": "development/libraries.html", "target": "/libraries.html"}
@@ -160,17 +168,20 @@
 {"source": "ingestion/command-line-hadoop-indexer.html", "target": "hadoop.html#cli"}
 {"source": "ingestion/compaction.html", "target": "../data-management/compaction.html"}
 {"source": "ingestion/data-management.html", "target": "../data-management/index.html"}
+{"source": "ingestion/data-model.html", "target": "../ingestion/schema-model.html"}
 {"source": "ingestion/delete-data.html", "target": "../data-management/delete.html"}
 {"source": "ingestion/firehose.html", "target": "native-batch-firehose.html"}
 {"source": "ingestion/flatten-json.html", "target": "ingestion-spec.html#flattenspec"}
 {"source": "ingestion/hadoop-vs-native-batch.html", "target": "index.html#batch"}
 {"source": "ingestion/ingestion.html", "target": "index.html"}
 {"source": "ingestion/locking-and-priority.html", "target": "tasks.html#locks"}
+{"source": "ingestion/migrate-from-firehose.html", "target": "../operations/migrate-from-firehose.html"}
 {"source": "ingestion/misc-tasks.html", "target": "tasks.html#all-task-types"}
 {"source": "ingestion/native_tasks.html", "target": "native-batch.html"}
+{"source": "ingestion/native-batch-input-sources.html", "target": "input-sources.html"}
 {"source": "ingestion/overview.html", "target": "index.html"}
 {"source": "ingestion/realtime-ingestion.html", "target": "index.html"}
-{"source": "ingestion/reports.html", "target": "tasks.html#reports"}
+{"source": "ingestion/reports.html", "target": "tasks.html#task-reports"}
 {"source": "ingestion/schema-changes.html", "target": "../design/segments.html#segments-with-different-schemas"}
 {"source": "ingestion/stream-ingestion.html", "target": "index.html#streaming"}
 {"source": "ingestion/stream-pull.html", "target": "../ingestion/standalone-realtime.html"}
@@ -179,12 +190,21 @@
 {"source": "ingestion/update-existing-data.html", "target": "../data-management/update.html"}
 {"source": "misc/cluster-setup.html", "target": "../tutorials/cluster.html"}
 {"source": "misc/evaluate.html", "target": "../tutorials/cluster.html"}
+{"source": "misc/math-expr.html", "target": "../querying/math-expr.html"}
 {"source": "misc/tasks.html", "target": "../ingestion/tasks.html"}
-{"source": "operations/including-extensions.html", "target": "../development/extensions.html"}
+{"source": "multi-stage-query/api.html", "target": "../api-reference/sql-ingestion-api.html"}
+{"source": "operations/api-reference.html", "target": "../api-reference/api-reference.html"}
+{"source": "operations/druid-console.html", "target": "web-console.html"}
+{"source": "operations/getting-started.html", "target": "../design/index.html"}
+{"source": "operations/including-extensions.html", "target": "../configuration/extensions.html"}
+{"source": "operations/management-uis.html", "target": "web-console.html"}
 {"source": "operations/multitenancy.html", "target": "../querying/multitenancy.html"}
+{"source": "operations/recommendations.html", "target": "basic-cluster-tuning.html"}
 {"source": "operations/performance-faq.html", "target": "../operations/basic-cluster-tuning.html"}
 {"source": "querying/optimizations.html", "target": "multi-value-dimensions.html"}
 {"source": "querying/searchqueryspec.html", "target": "searchquery.html"}
+{"source": "querying/sql-api.html", "target": "../api-reference/sql-api.html"}
+{"source": "querying/sql-jdbc.html", "target": "../api-reference/sql-jdbc.html"}
 {"source": "tutorials/booting-a-production-cluster.html", "target": "cluster.html"}
 {"source": "tutorials/examples.html", "target": "index.html"}
 {"source": "tutorials/firewall.html", "target": "cluster.html"}
@@ -197,9 +217,3 @@
 {"source": "tutorials/tutorial-loading-streaming-data.html", "target": "tutorial-kafka.html"}
 {"source": "tutorials/tutorial-the-druid-cluster.html", "target": "cluster.html"}
 {"source": "tutorials/tutorial-tranquility.html", "target": "../ingestion/tranquility.html"}
-{"source": "development/extensions-contrib/google.html", "target": "../extensions-core/google.html"}
-{"source": "development/integrating-druid-with-other-technologies.html", "target": "../ingestion/index.html"}
-{"source": "operations/druid-console.html", "target": "web-console.html"}
-{"source": "operations/getting-started.html", "target": "../design/index.html"}
-{"source": "operations/management-uis.html", "target": "web-console.html"}
-{"source": "operations/recommendations.html", "target": "basic-cluster-tuning.html"}
diff --git a/website/sidebars.json b/website/sidebars.json
index f1ab145c04..aea3a5c444 100644
--- a/website/sidebars.json
+++ b/website/sidebars.json
@@ -7,7 +7,6 @@
       "tutorials/cluster"
     ],
     "Tutorials": [
-      "tutorials/tutorial-batch",
       "tutorials/tutorial-msq-extern",
       "tutorials/tutorial-kafka",
       "tutorials/tutorial-batch-hadoop",
@@ -20,10 +19,9 @@
       "tutorials/tutorial-delete-data",
       "tutorials/tutorial-ingestion-spec",
       "tutorials/tutorial-transform-spec",
+      "tutorials/tutorial-msq-convert-spec",
       "tutorials/docker",
       "tutorials/tutorial-kerberos-hadoop",
-      "tutorials/tutorial-msq-convert-spec",
-      "tutorials/tutorial-jdbc",
       "tutorials/tutorial-sql-query-view",
       "tutorials/tutorial-unnest-arrays",
       "tutorials/tutorial-jupyter-index",
@@ -34,54 +32,55 @@
       "design/architecture",
       "design/segments",
       "design/processes",
-      "dependencies/deep-storage",
-      "dependencies/metadata-storage",
-      "dependencies/zookeeper"
+      "design/deep-storage",
+      "design/metadata-storage",
+      "design/zookeeper"
     ],
     "Ingestion": [
       "ingestion/index",
-      "ingestion/data-formats",
-      "ingestion/data-model",
-      "ingestion/rollup",
-      "ingestion/partitioning",
-      "ingestion/ingestion-spec",
-      "ingestion/schema-design",
-      {
-        "type": "subcategory",
-        "label": "Stream ingestion",
-        "ids": [
-          "development/extensions-core/kafka-ingestion",
-          "development/extensions-core/kafka-supervisor-reference",
-          "development/extensions-core/kafka-supervisor-operations",
-          "development/extensions-core/kinesis-ingestion",
-          "ingestion/standalone-realtime"
-        ]
-      },
       {
         "type": "subcategory",
-        "label": "Batch ingestion",
+        "label": "Ingestion concepts",
         "ids": [
-          "ingestion/native-batch",
-          "ingestion/native-batch-input-sources",
-          "ingestion/migrate-from-firehose",
-          "ingestion/native-batch-firehose",
-          "ingestion/hadoop"
+          "ingestion/data-formats",
+          "ingestion/input-sources",
+          "ingestion/schema-model",
+          "ingestion/rollup",
+          "ingestion/partitioning"
         ]
       },
       {
         "type": "subcategory",
-        "label": "SQL-based ingestion \uD83C\uDD95",
+        "label": "SQL-based batch",
         "ids": [
           "multi-stage-query/index",
           "multi-stage-query/concepts",
-          "multi-stage-query/api",
           "multi-stage-query/security",
           "multi-stage-query/examples",
           "multi-stage-query/reference",
           "multi-stage-query/known-issues"
         ]
       },
-      "ingestion/tasks",
+      {
+        "type": "subcategory",
+        "label": "Streaming",
+        "ids": [
+          "development/extensions-core/kafka-ingestion",
+          "development/extensions-core/kafka-supervisor-reference",
+          "development/extensions-core/kafka-supervisor-operations",
+          "development/extensions-core/kinesis-ingestion"
+        ]
+      },
+      {
+        "type": "subcategory",
+        "label": "Classic batch",
+        "ids": [
+          "ingestion/native-batch",
+          "ingestion/hadoop"
+        ]
+      },
+      "ingestion/ingestion-spec",
+      "ingestion/schema-design",
       "ingestion/faq"
     ],
     "Data management": [
@@ -105,8 +104,6 @@
           "querying/sql-multivalue-string-functions",
           "querying/sql-json-functions",
           "querying/sql-functions",
-          "querying/sql-api",
-          "querying/sql-jdbc",
           "querying/sql-query-context",
           "querying/sql-metadata-tables",
           "querying/sql-translation"
@@ -154,19 +151,25 @@
           "querying/dimensionspecs",
           "querying/aggregations",
           "querying/post-aggregations",
-          "misc/math-expr",
+          "querying/math-expr",
           "querying/having",
           "querying/limitspec",
           "querying/topnmetricspec",
           "querying/sorting-orders",
           "querying/virtual-columns",
-          "development/geo"
+          "querying/geo"
         ]
       }
     ],
+    "API reference":[
+      "api-reference/sql-api",
+      "api-reference/sql-ingestion-api",
+      "api-reference/sql-jdbc",
+      "api-reference/api-reference"
+    ],
     "Configuration": [
       "configuration/index",
-      "development/extensions",
+      "configuration/extensions",
       "configuration/logging"
     ],
     "Operations": [
@@ -181,7 +184,6 @@
           "operations/auth-ldap",
           "operations/password-provider",
           "operations/dynamic-config-provider",
-          "design/auth",
           "operations/tls-support"
         ]
       },
@@ -205,10 +207,10 @@
           "operations/alerts"
         ]
       },
-      "operations/api-reference",
       "operations/high-availability",
       "operations/rolling-updates",
       "operations/rule-configuration",
+      "operations/migrate-from-firehose",
       "operations/other-hadoop",
       {
         "type": "subcategory",
@@ -243,7 +245,7 @@
       "comparisons/druid-vs-redshift",
       "comparisons/druid-vs-spark",
       "comparisons/druid-vs-sql-on-hadoop",
-      "design/auth",
+      "operations/auth",
       "design/broker",
       "design/coordinator",
       "design/historical",
@@ -303,12 +305,13 @@
       "development/extensions-contrib/gce-extensions",
       "development/extensions-contrib/aliyun-oss",
       "development/extensions-contrib/prometheus",
+      "ingestion/native-batch-firehose",
+      "ingestion/native-batch-simple-task",
+      "ingestion/standalone-realtime",
       "operations/kubernetes",
       "querying/hll-old",
       "querying/select-query",
-      "ingestion/native-batch-firehose",
-      "ingestion/native-batch-simple-task",
-      "ingestion/standalone-realtime"
+      "tutorials/tutorial-batch"
     ]
   }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@druid.apache.org
For additional commands, e-mail: commits-help@druid.apache.org