You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@drill.apache.org by dz...@apache.org on 2020/12/10 04:32:09 UTC

[drill-site] branch asf-site updated: Website update

This is an automated email from the ASF dual-hosted git repository.

dzamo pushed a commit to branch asf-site
in repository https://gitbox.apache.org/repos/asf/drill-site.git


The following commit(s) were added to refs/heads/asf-site by this push:
     new d2cf3f7  Website update
d2cf3f7 is described below

commit d2cf3f739d1d7b62f766664c5f6d1a8c8d85b198
Author: James Turton <ja...@somecomputer.xyz>
AuthorDate: Thu Dec 10 06:31:18 2020 +0200

    Website update
---
 README.md                                          |    2 +-
 apacheASF/index.html                               |    2 +-
 architecture/index.html                            |    2 +-
 blog/2014/11/19/sql-on-mongodb/index.html          |  210 +-
 blog/2014/12/02/drill-top-level-project/index.html |   26 +-
 .../apache-drill-qa-panelist-spotlight/index.html  |   15 +-
 blog/2014/12/16/whats-coming-in-2015/index.html    |  141 +-
 blog/2014/12/23/drill-0.7-released/index.html      |   20 +-
 .../index.html                                     |   75 +-
 .../index.html                                     |   10 +-
 blog/2015/03/31/drill-0.8-released/index.html      |   24 +-
 .../index.html                                     |   12 +-
 blog/2015/05/04/drill-0.9-released/index.html      |   39 +-
 blog/2015/05/19/drill-1.0-released/index.html      |   38 +-
 .../index.html                                     |   32 +-
 blog/2015/07/05/drill-1.1-released/index.html      |   49 +-
 .../23/drill-tutorial-at-nosql-now-2015/index.html |    6 +-
 blog/2015/10/16/drill-1.2-released/index.html      |   25 +-
 blog/2015/11/23/drill-1.3-released/index.html      |   40 +-
 blog/2015/12/14/drill-1.4-released/index.html      |   57 +-
 blog/2016/02/16/drill-1.5-released/index.html      |   16 +-
 blog/2016/03/16/drill-1.6-released/index.html      |    9 +-
 blog/2016/06/28/drill-1.7-released/index.html      |   14 +-
 blog/2016/08/30/drill-1.8-released/index.html      |   18 +-
 blog/2016/11/29/drill-1.9-released/index.html      |   15 +-
 blog/2017/03/15/drill-1.10-released/index.html     |   20 +-
 blog/2017/07/31/drill-1.11-released/index.html     |   60 +-
 blog/2017/12/15/drill-1.12-released/index.html     |   78 +-
 .../09/running-sql-queries-on-amazon-s3/index.html |   56 +-
 blog/2018/03/18/drill-1.13-released/index.html     |   23 +-
 blog/2018/08/05/drill-1.14-released/index.html     |   22 +-
 blog/2018/10/16/drill-developer-day/index.html     |    3 +-
 blog/2018/10/16/drill-user-meetup/index.html       |    3 +-
 blog/2018/12/31/drill-1.15-released/index.html     |   16 +-
 blog/2019/05/02/drill-1.16-released/index.html     |   29 +-
 blog/2019/05/02/drill-user-meetup/index.html       |    3 +-
 blog/2019/12/26/drill-1.17-released/index.html     |   32 +-
 .../09/04/drill-1.18-released}/index.html          |   40 +-
 .../09/05/drill-1.18-released}/index.html          |   24 +-
 blog/index.html                                    |   12 +-
 community-resources/index.html                     |   42 +-
 data/index.html                                    |  114 +-
 docs/011-running-drill-on-docker.md                |   91 +
 .../070-configuring-user-security.md               |    4 +-
 docs/about-sql-function-examples/index.html        |   83 +-
 docs/about-the-mapr-sandbox/index.html             |   75 +-
 .../index.html                                     |   76 +-
 docs/adding-custom-functions-to-drill/index.html   |   72 +-
 .../aggregate-and-aggregate-statistical/index.html |  846 +-
 docs/aggregate-window-functions/index.html         |  274 +-
 docs/alter-system/index.html                       |  175 +-
 docs/analyze-table-compute-statistics/index.html   |  491 +-
 docs/analyze-table-refresh-metadata/index.html     |  158 +-
 .../index.html                                     |  503 +-
 docs/analyzing-highly-dynamic-datasets/index.html  |  289 +-
 docs/analyzing-social-media/index.html             |  323 +-
 .../analyzing-the-yelp-academic-dataset/index.html |  371 +-
 docs/apache-drill-0-4-0-release-notes/index.html   |   95 +-
 docs/apache-drill-0-5-0-release-notes/index.html   |   89 +-
 docs/apache-drill-0-6-0-release-notes/index.html   |   94 +-
 docs/apache-drill-0-7-0-release-notes/index.html   |  151 +-
 docs/apache-drill-0-8-0-release-notes/index.html   |   93 +-
 docs/apache-drill-0-9-0-release-notes/index.html   |  108 +-
 docs/apache-drill-1-0-0-release-notes/index.html   |  552 +-
 docs/apache-drill-1-1-0-release-notes/index.html   |  413 +-
 docs/apache-drill-1-10-0-release-notes/index.html  |  344 +-
 docs/apache-drill-1-11-0-release-notes/index.html  |  360 +-
 docs/apache-drill-1-12-0-release-notes/index.html  |  474 +-
 docs/apache-drill-1-13-0-release-notes/index.html  |  360 +-
 docs/apache-drill-1-14-0-release-notes/index.html  |  634 +-
 docs/apache-drill-1-15-0-release-notes/index.html  |  557 +-
 docs/apache-drill-1-16-0-release-notes/index.html  |  573 +-
 docs/apache-drill-1-17-0-release-notes/index.html  |  503 +-
 .../index.html                                     |  604 +-
 docs/apache-drill-1-2-0-release-notes/index.html   |  580 +-
 docs/apache-drill-1-3-0-release-notes/index.html   |  200 +-
 docs/apache-drill-1-4-0-release-notes/index.html   |  153 +-
 docs/apache-drill-1-5-0-release-notes/index.html   |  205 +-
 docs/apache-drill-1-6-0-release-notes/index.html   |  177 +-
 docs/apache-drill-1-7-0-release-notes/index.html   |  207 +-
 docs/apache-drill-1-8-0-release-notes/index.html   |  229 +-
 docs/apache-drill-1-9-0-release-notes/index.html   |  234 +-
 .../index.html                                     |  224 +-
 docs/apache-drill-contribution-ideas/index.html    |  202 +-
 .../index.html                                     |  105 +-
 docs/appendix-a-release-note-issues/index.html     |  263 +-
 docs/appendix-b-drill-env-sh-settings/index.html   |   81 +-
 docs/appendix-c-troubleshooting/index.html         |  165 +-
 .../index.html                                     |  103 +-
 docs/architecture-introduction/index.html          |   82 +-
 docs/architecture/index.html                       |   72 +-
 docs/asynchronous-parquet-reader/index.html        |  376 +-
 docs/attachments/intellij-idea-settings.jar        |  Bin 1956 -> 0 bytes
 docs/azure-blob-storage-plugin/index.html          |  184 +-
 docs/browsing-data-and-defining-views/index.html   |  157 +-
 docs/case/index.html                               |  107 +-
 docs/choosing-a-storage-format/index.html          |   75 +-
 docs/compiling-drill-from-source/index.html        |  138 +-
 docs/configuration-options-introduction/index.html |  989 +-
 docs/configuration-options/index.html              |   72 +-
 docs/configuration-reference/index.html            |  279 +-
 docs/configure-drill-introduction/index.html       |   84 +-
 docs/configure-drill/index.html                    |   72 +-
 .../index.html                                     |   94 +-
 docs/configuring-a-multitenant-cluster/index.html  |   72 +-
 .../index.html                                     |  234 +-
 .../index.html                                     |  132 +-
 docs/configuring-drill-memory/index.html           |  207 +-
 .../index.html                                     |  269 +-
 docs/configuring-inbound-impersonation/index.html  |  144 +-
 docs/configuring-index-planning/index.html         |  261 +-
 docs/configuring-jreport-with-drill/index.html     |  141 +-
 docs/configuring-kerberos-security/index.html      |  458 +-
 docs/configuring-multitenant-resources/index.html  |   99 +-
 docs/configuring-odbc-on-linux/index.html          |  250 +-
 docs/configuring-odbc-on-mac-os-x/index.html       |  228 +-
 docs/configuring-odbc-on-windows/index.html        |  136 +-
 docs/configuring-odbc/index.html                   |   72 +-
 docs/configuring-plain-security/index.html         |  146 +-
 .../index.html                                     |  102 +-
 docs/configuring-ssl-tls-for-encryption/index.html |  622 +-
 docs/configuring-storage-plugins/index.html        |  180 +-
 docs/configuring-the-drill-shell/index.html        | 1011 +-
 .../index.html                                     |  172 +-
 .../index.html                                     |  312 +-
 docs/configuring-user-impersonation/index.html     |  254 +-
 .../index.html                                     |  572 +-
 docs/connect-a-data-source-introduction/index.html |   80 +-
 docs/connect-a-data-source/index.html              |   85 +-
 docs/connecting-drill-explorer-to-data/index.html  |  144 +-
 docs/contribute-to-drill/index.html                |   72 +-
 .../index.html                                     |   73 +-
 docs/core-modules/index.html                       |   95 +-
 docs/create-function-using-jar/index.html          |  119 +-
 docs/create-or-replace-schema/index.html           | 1024 +-
 docs/create-table-as-ctas/index.html               |  276 +-
 docs/create-temporary-table-as-cttas/index.html    |  211 +-
 docs/create-view/index.html                        |  275 +-
 docs/creating-a-basic-drill-cluster/index.html     |  322 +-
 docs/creating-custom-authenticators/index.html     |  242 +-
 docs/cryptography-functions/index.html             |  191 +-
 docs/custom-function-interfaces/index.html         |  166 +-
 .../index.html                                     |  190 +-
 docs/data-sources-and-file-formats/index.html      |   99 +-
 docs/data-type-conversion/index.html               | 1716 ++--
 docs/data-type-functions/index.html                |  146 +-
 docs/data-types/index.html                         |   72 +-
 docs/date-time-and-timestamp/index.html            |  271 +-
 docs/date-time-functions-and-arithmetic/index.html | 1256 +--
 docs/deploying-and-using-a-hive-udf/index.html     |  159 +-
 docs/describe/index.html                           |  317 +-
 docs/design-docs/index.html                        |   72 +-
 docs/designing-indexes-for-your-queries/index.html |  399 +-
 .../index.html                                     |   87 +-
 docs/develop-custom-functions/index.html           |   74 +-
 docs/develop-drill/index.html                      |   72 +-
 docs/developer-information/index.html              |   72 +-
 docs/developing-a-simple-function/index.html       |  149 +-
 docs/developing-an-aggregate-function/index.html   |  151 +-
 docs/distributed-mode-prerequisites/index.html     |   88 +-
 docs/drill-default-input-format/index.html         |  122 +-
 docs/drill-explorer-introduction/index.html        |   75 +-
 docs/drill-iceberg-metastore/index.html            |  104 +-
 docs/drill-in-10-minutes/index.html                |  254 +-
 docs/drill-introduction/index.html                 |  567 +-
 docs/drill-metastore/index.html                    |   72 +-
 docs/drill-on-yarn-command-line-tool/index.html    |  174 +-
 docs/drill-on-yarn-introduction/index.html         |  111 +-
 docs/drill-on-yarn/index.html                      |   72 +-
 docs/drill-plan-syntax/index.html                  |   77 +-
 docs/drill-query-execution/index.html              |  102 +-
 docs/drop-function-using-jar/index.html            |  114 +-
 docs/drop-table/index.html                         |  410 +-
 docs/drop-view/index.html                          |  191 +-
 .../index.html                                     |  192 +-
 docs/dynamic-udfs/index.html                       |  321 +-
 docs/ecosystem/index.html                          |   72 +-
 docs/embedded-mode-prerequisites/index.html        |   90 +-
 docs/enabling-query-queuing/index.html             |  127 +-
 docs/enabling-web-ui-security/index.html           |  102 +-
 docs/enron-emails/index.html                       |   76 +-
 docs/error-messages/index.html                     |  121 +-
 .../index.html                                     |  174 +-
 .../index.html                                     |  236 +-
 docs/explain/index.html                            |  199 +-
 docs/file-system-storage-plugin/index.html         |  217 +-
 docs/flatten/index.html                            |  157 +-
 docs/from-clause/index.html                        |  355 +-
 docs/functions-for-handling-nulls/index.html       |  115 +-
 docs/getting-started/index.html                    |   72 +-
 docs/getting-to-know-the-drill-sandbox/index.html  |  198 +-
 docs/{explain => gis-functions}/index.html         |  396 +-
 docs/global-query-list/index.html                  |  101 +-
 docs/group-by-clause/index.html                    |  153 +-
 .../index.html                                     |   85 +-
 docs/handling-different-data-types/index.html      |  113 +-
 docs/having-clause/index.html                      |  130 +-
 docs/hbase-storage-plugin/index.html               |   95 +-
 .../index.html                                     |  327 +-
 docs/hive-metadata-caching/index.html              |  140 +-
 docs/hive-storage-plugin/index.html                |  270 +-
 docs/hive-to-drill-data-type-mapping/index.html    |  353 +-
 docs/how-to-partition-data/index.html              |  149 +-
 .../index.html                                     | 1061 ++-
 docs/httpd-format-plugin/index.html                |  458 +-
 .../index.html                                     |  131 +-
 docs/identifying-performance-issues/index.html     |   72 +-
 docs/image-metadata-format-plugin/index.html       | 9796 ++++++++++----------
 docs/index-selection/index.html                    |   87 +-
 docs/index.html                                    |   70 +-
 docs/install-drill-introduction/index.html         |   73 +-
 docs/install-drill/index.html                      |   74 +-
 .../index.html                                     |   72 +-
 docs/installing-drill-in-embedded-mode/index.html  |   75 +-
 .../index.html                                     |   93 +-
 docs/installing-drill-on-the-cluster/index.html    |  108 +-
 docs/installing-drill-on-windows/index.html        |  138 +-
 .../installing-the-apache-drill-sandbox/index.html |  235 +-
 docs/installing-the-driver-on-linux/index.html     |  197 +-
 docs/installing-the-driver-on-mac-os-x/index.html  |  120 +-
 docs/installing-the-driver-on-windows/index.html   |  153 +-
 docs/installing-the-odbc-driver/index.html         |   72 +-
 docs/interfaces-introduction/index.html            |  104 +-
 docs/join-planning-guidelines/index.html           |  113 +-
 docs/json-data-model/index.html                    |  715 +-
 docs/kafka-storage-plugin/index.html               |  392 +-
 docs/kvgen/index.html                              |  344 +-
 docs/lateral-join/index.html                       |  511 +-
 docs/launch-drill-under-yarn/index.html            |  106 +-
 docs/learn-drill-with-the-mapr-sandbox/index.html  |   72 +-
 docs/lesson-1-learn-about-the-data-set/index.html  |  333 +-
 docs/lesson-2-run-queries-with-ansi-sql/index.html |  333 +-
 .../index.html                                     |  339 +-
 docs/lexical-structure/index.html                  |  458 +-
 docs/limit-clause/index.html                       |  177 +-
 docs/log-and-debug-introduction/index.html         |   91 +-
 docs/log-and-debug/index.html                      |   72 +-
 docs/logfile-plugin/index.html                     |  172 +-
 docs/logging-and-tracing/index.html                |  248 +-
 docs/ltsv-format-plugin/index.html                 |  137 +-
 .../index.html                                     |  101 +-
 docs/mapr-db-format/index.html                     |   87 +-
 docs/math-and-trig/index.html                      |  606 +-
 docs/migrating-parquet-data/index.html             |  118 +-
 docs/modify-logback-xml/index.html                 |  142 +-
 docs/modifying-query-planning-options/index.html   |  114 +-
 docs/mongodb-storage-plugin/index.html             |  296 +-
 .../index.html                                     |  103 +-
 docs/monitoring-metrics/index.html                 |  510 +-
 docs/multiple-drill-clusters/index.html            |  126 +-
 docs/nested-data-functions/index.html              |   72 +-
 docs/nested-data-limitations/index.html            |   72 +-
 docs/odbc-configuration-reference/index.html       |  550 +-
 docs/odbc-jdbc-interfaces/index.html               |   74 +-
 docs/offset-clause/index.html                      |  101 +-
 docs/opentsdb-storage-plugin/index.html            |  214 +-
 docs/operators/index.html                          |  393 +-
 .../optimizing-parquet-metadata-reading/index.html |  105 +-
 docs/order-by-clause/index.html                    |  147 +-
 docs/parquet-filter-pushdown/index.html            |  343 +-
 docs/parquet-format/index.html                     |  532 +-
 docs/partition-by-clause/index.html                |  356 +-
 docs/partition-pruning-introduction/index.html     |   93 +-
 docs/partition-pruning/index.html                  |   72 +-
 docs/performance-tuning-introduction/index.html    |   90 +-
 docs/performance-tuning-reference/index.html       |   72 +-
 docs/performance-tuning/index.html                 |   72 +-
 docs/performance/index.html                        |   78 +-
 docs/persistent-configuration-storage/index.html   |  143 +-
 docs/phonetic-functions/index.html                 |  152 +-
 docs/physical-operators/index.html                 |  490 +-
 docs/planning-and-execution-options/index.html     |  251 +-
 docs/plugin-configuration-basics/index.html        |  219 +-
 docs/ports-used-by-drill/index.html                |  145 +-
 docs/project-bylaws/index.html                     |  102 +-
 .../index.html                                     |  134 +-
 docs/query-audit-logging/index.html                |  128 +-
 docs/query-data-introduction/index.html            |  164 +-
 docs/query-data/index.html                         |   72 +-
 docs/query-directory-functions/index.html          |  129 +-
 .../query-plans-and-tuning-introduction/index.html |   75 +-
 docs/query-plans-and-tuning/index.html             |   72 +-
 docs/query-plans/index.html                        |  110 +-
 docs/query-profile-column-descriptions/index.html  |  455 +-
 docs/query-profiles/index.html                     |  300 +-
 docs/query-stages/index.html                       |   73 +-
 .../querying-a-file-system-introduction/index.html |  211 +-
 docs/querying-a-file-system/index.html             |   72 +-
 docs/querying-avro-files/index.html                |   94 +-
 docs/querying-complex-data-introduction/index.html |  103 +-
 docs/querying-complex-data/index.html              |   72 +-
 docs/querying-directories/index.html               |  185 +-
 docs/querying-hbase/index.html                     |  585 +-
 docs/querying-hive/index.html                      |  161 +-
 docs/querying-indexes-introduction/index.html      |   78 +-
 docs/querying-indexes/index.html                   |   72 +-
 docs/querying-json-files/index.html                |  100 +-
 docs/querying-parquet-files/index.html             |  118 +-
 docs/querying-plain-text-files/index.html          |  338 +-
 docs/querying-sequence-files/index.html            |   98 +-
 docs/querying-system-tables/index.html             |  373 +-
 docs/querying-the-information-schema/index.html    |  322 +-
 docs/ranking-window-functions/index.html           |  266 +-
 docs/rdbms-storage-plugin/index.html               |  282 +-
 docs/refresh-table-metadata/index.html             |  395 +-
 docs/release-notes/index.html                      |   77 +-
 docs/repeated-contains/index.html                  |  206 +-
 docs/repeated-count/index.html                     |  107 +-
 docs/reserved-keywords/index.html                  | 3936 ++++----
 docs/reset/index.html                              |  141 +-
 docs/rest-api-introduction/index.html              |  528 +-
 docs/rest-api/index.html                           |   72 +-
 docs/review-the-java-stack-trace/index.html        |  122 +-
 docs/roles-and-privileges/index.html               |   91 +-
 docs/rpc-overview/index.html                       |   87 +-
 docs/running-drill-on-docker/index.html            | 1475 ---
 docs/s3-storage-plugin/index.html                  |  238 +-
 docs/sample-data-donuts/index.html                 |  122 +-
 docs/sample-datasets/index.html                    |   72 +-
 docs/secure-communication-paths/index.html         |  322 +-
 docs/securing-drill-introduction/index.html        |  119 +-
 docs/securing-drill/index.html                     |   72 +-
 docs/select-list/index.html                        |  159 +-
 docs/select/index.html                             |  168 +-
 docs/selecting-flat-data/index.html                |   90 +-
 .../index.html                                     |   90 +-
 docs/selecting-nested-data-for-a-column/index.html |  116 +-
 docs/sequence-files/index.html                     |  116 +-
 docs/set/index.html                                |  177 +-
 docs/show-databases-and-show-schemas/index.html    |  140 +-
 docs/show-files/index.html                         |  148 +-
 docs/show-tables/index.html                        |  273 +-
 .../index.html                                     |  238 +-
 .../index.html                                     |  206 +-
 docs/sql-commands/index.html                       |   72 +-
 docs/sql-conditional-expressions/index.html        |   72 +-
 .../index.html                                     |  655 +-
 docs/sql-extensions/index.html                     |  165 +-
 docs/sql-functions/index.html                      |   81 +-
 docs/sql-reference-introduction/index.html         |   73 +-
 docs/sql-reference/index.html                      |   78 +-
 docs/sql-window-functions-examples/index.html      |  205 +-
 docs/sql-window-functions-introduction/index.html  |  374 +-
 docs/sql-window-functions/index.html               |   74 +-
 docs/start-up-options/index.html                   |  186 +-
 docs/starting-drill-in-distributed-mode/index.html |  131 +-
 .../index.html                                     |  105 +-
 docs/starting-drill-on-windows/index.html          |  152 +-
 docs/starting-the-web-ui/index.html                |  124 +-
 docs/stopping-drill/index.html                     |  129 +-
 docs/storage-plugin-configuration/index.html       |   72 +-
 docs/storage-plugin-registration/index.html        |  127 +-
 docs/string-distance-functions/index.html          |  135 +-
 docs/string-manipulation/index.html                |  947 +-
 .../index.html                                     |  110 +-
 docs/summary/index.html                            |   72 +-
 docs/supported-data-types/index.html               | 1273 +--
 docs/supported-sql-commands/index.html             |  303 +-
 docs/syslog-format-plugin/index.html               |  166 +-
 docs/tableau-examples/index.html                   |  423 +-
 docs/testing-the-odbc-connection/index.html        |  330 +-
 docs/text-files-csv-tsv-psv/index.html             |  520 +-
 docs/throttling/index.html                         |  154 +-
 .../index.html                                     |  291 +-
 docs/troubleshooting/index.html                    |  322 +-
 docs/tutorial-develop-a-simple-function/index.html |  243 +-
 docs/tutorials-introduction/index.html             |  127 +-
 docs/tutorials/index.html                          |   72 +-
 docs/types-of-indexes/index.html                   |  195 +-
 docs/union-set-operator/index.html                 |  117 +-
 .../index.html                                     |  111 +-
 docs/use/index.html                                |  307 +-
 .../index.html                                     |   74 +-
 docs/useful-research/index.html                    |  127 +-
 .../index.html                                     |  156 +-
 .../index.html                                     |  158 +-
 .../index.html                                     |  156 +-
 docs/using-custom-functions-in-queries/index.html  |  135 +-
 docs/using-drill-explorer/index.html               |   72 +-
 docs/using-drill-metastore/index.html              |  972 +-
 .../index.html                                     |  123 +-
 docs/using-drill-with-bi-tools/index.html          |   72 +-
 .../index.html                                     |  158 +-
 .../using-jdbc-with-squirrel-on-windows/index.html |  305 +-
 .../using-jpam-as-the-pam-authenticator/index.html |  151 +-
 .../index.html                                     |  127 +-
 .../index.html                                     |  266 +-
 docs/using-qlik-sense-with-drill/index.html        |  170 +-
 .../index.html                                     |  238 +-
 .../index.html                                     |  130 +-
 docs/using-the-drill-on-yarn-web-ui/index.html     |  155 +-
 docs/using-the-jdbc-driver/index.html              |  183 +-
 .../index.html                                     |  121 +-
 docs/value-vectors/index.html                      |   87 +-
 docs/value-window-functions/index.html             |  237 +-
 docs/verifying-index-use/index.html                |  157 +-
 docs/where-clause/index.html                       |  141 +-
 docs/why-drill/index.html                          |  133 +-
 docs/wikipedia-edit-history/index.html             |  140 +-
 docs/with-clause/index.html                        |  120 +-
 docs/workspaces/index.html                         |  177 +-
 download/index.html                                |   20 +-
 faq/index.html                                     |  169 +-
 feed.xml                                           |  361 +-
 {docs/img => images/docs}/11.png                   |  Bin
 {docs/img => images/docs}/18.png                   |  Bin
 {docs/img => images/docs}/19.png                   |  Bin
 {docs/img => images/docs}/1_vbImport.png           |  Bin
 {docs/img => images/docs}/21.png                   |  Bin
 {docs/img => images/docs}/30.png                   |  Bin
 {docs/img => images/docs}/3_vbNetwork.png          |  Bin
 {docs/img => images/docs}/4.png                    |  Bin
 {docs/img => images/docs}/40.png                   |  Bin
 {docs/img => images/docs}/42.png                   |  Bin
 {docs/img => images/docs}/46.png                   |  Bin
 {docs/img => images/docs}/4_vbMaprSetting.png      |  Bin
 {docs/img => images/docs}/51.png                   |  Bin
 {docs/img => images/docs}/52.png                   |  Bin
 {docs/img => images/docs}/53.png                   |  Bin
 {docs/img => images/docs}/54.png                   |  Bin
 {docs/img => images/docs}/58.png                   |  Bin
 {docs/img => images/docs}/7.png                    |  Bin
 {docs/img => images/docs}/BI_to_Drill_2.png        |  Bin
 {docs/img => images/docs}/DrillWebUI.png           |  Bin
 {docs/img => images/docs}/DrillbitModules.png      |  Bin
 {docs/img => images/docs}/HbaseViewCreation0.png   |  Bin
 {docs/img => images/docs}/HbaseViewDSN.png         |  Bin
 {docs/img => images/docs}/Hbase_Browse.png         |  Bin
 {docs/img => images/docs}/Hive_DSN.png             |  Bin
 {docs/img => images/docs}/ODBC_CustomSQL.png       |  Bin
 {docs/img => images/docs}/ODBC_HbasePreview2.png   |  Bin
 {docs/img => images/docs}/ODBC_HbaseView.png       |  Bin
 {docs/img => images/docs}/ODBC_HiveConnection.png  |  Bin
 {docs/img => images/docs}/ODBC_to_Drillbit.png     |  Bin
 {docs/img => images/docs}/ODBC_to_Quorum.png       |  Bin
 {docs/img => images/docs}/Overview.png             |  Bin
 {docs/img => images/docs}/Parquet_DSN.png          |  Bin
 {docs/img => images/docs}/Parquet_Preview.png      |  Bin
 {docs/img => images/docs}/RegionParquet_table.png  |  Bin
 {docs/img => images/docs}/SelectHbaseView.png      |  Bin
 {docs/img => images/docs}/T10.2_IMG_1.png          |  Bin
 {docs/img => images/docs}/T10.2_IMG_2.png          |  Bin
 {docs/img => images/docs}/T10.2_IMG_3.png          |  Bin
 {docs/img => images/docs}/T10.2_IMG_4.png          |  Bin
 {docs/img => images/docs}/T10.2_IMG_5.png          |  Bin
 {docs/img => images/docs}/T10.2_IMG_6.png          |  Bin
 {docs/img => images/docs}/T10.2_IMG_7.png          |  Bin
 {docs/img => images/docs}/UserAuthProcess.PNG      |  Bin
 {docs/img => images/docs}/UserAuth_ODBC_Driver.png |  Bin
 .../docs}/VoterContributions_hbaseview.png         |  Bin
 {docs/img => images/docs}/client-drillbit-ssl.PNG  |  Bin
 .../docs}/client-encrypt-compatibility.png         |  Bin
 {docs/img => images/docs}/client-phys-plan.png     |  Bin
 {docs/img => images/docs}/connect-list.png         |  Bin
 {docs/img => images/docs}/connect-plugin.png       |  Bin
 {docs/img => images/docs}/csv_no_header.png        |  Bin
 {docs/img => images/docs}/csv_with_comments.png    |  Bin
 {docs/img => images/docs}/csv_with_escape.png      |  Bin
 {docs/img => images/docs}/csv_with_header.png      |  Bin
 {docs/img => images/docs}/ctas-1.png               |  Bin
 {docs/img => images/docs}/ctas-2.png               |  Bin
 {docs/img => images/docs}/custom-sql-query.png     |  Bin
 .../img => images/docs}/data-sources-schemachg.png |  Bin
 {docs/img => images/docs}/data_skew.png            |  Bin
 .../docs}/datasources-json-bracket.png             |  Bin
 {docs/img => images/docs}/datasources-json.png     |  Bin
 {docs/img => images/docs}/doy-envsh-mapping.PNG    |  Bin
 {docs/img => images/docs}/drill-bin.png            |  Bin
 .../docs}/drill-channel-pipeline-with-handlers.png |  Bin
 {docs/img => images/docs}/drill-directory.png      |  Bin
 {docs/img => images/docs}/drill-runtime.png        |  Bin
 {docs/img => images/docs}/drill2.png               |  Bin
 {docs/img => images/docs}/drill_imp_simple.PNG     |  Bin
 {docs/img => images/docs}/edit-custom-sql.png      |  Bin
 {docs/img => images/docs}/ex-operator.png          |  Bin
 {docs/img => images/docs}/example_query.png        |  Bin
 {docs/img => images/docs}/execution-tree.PNG       |  Bin
 {docs/img => images/docs}/explorer-connect.png     |  Bin
 {docs/img => images/docs}/explorer-nation-data.png |  Bin
 {docs/img => images/docs}/explorer-schemas.png     |  Bin
 {docs/img => images/docs}/frag_profile.png         |  Bin
 {docs/img => images/docs}/get2kno_plugin.png       |  Bin
 {docs/img => images/docs}/graph_1.png              |  Bin
 {docs/img => images/docs}/histogram.png            |  Bin
 {docs/img => images/docs}/image_1.png              |  Bin
 {docs/img => images/docs}/image_10.png             |  Bin
 {docs/img => images/docs}/image_11.png             |  Bin
 {docs/img => images/docs}/image_12.png             |  Bin
 {docs/img => images/docs}/image_13.png             |  Bin
 {docs/img => images/docs}/image_14.png             |  Bin
 {docs/img => images/docs}/image_15.png             |  Bin
 {docs/img => images/docs}/image_16.png             |  Bin
 {docs/img => images/docs}/image_17.png             |  Bin
 {docs/img => images/docs}/image_2.png              |  Bin
 {docs/img => images/docs}/image_3.png              |  Bin
 {docs/img => images/docs}/image_4.png              |  Bin
 {docs/img => images/docs}/image_5.png              |  Bin
 {docs/img => images/docs}/image_6.png              |  Bin
 {docs/img => images/docs}/image_7.png              |  Bin
 {docs/img => images/docs}/image_8.png              |  Bin
 {docs/img => images/docs}/image_9.png              |  Bin
 {docs/img => images/docs}/inboundImpersonation.PNG |  Bin
 {docs/img => images/docs}/install-tableau-tdc.png  |  Bin
 {docs/img => images/docs}/install-windows-2-1.png  |  Bin
 .../img => images/docs}/install-windows-2-2-2.png  |  Bin
 {docs/img => images/docs}/install-windows-2-2.png  |  Bin
 {docs/img => images/docs}/install-windows-2.png    |  Bin
 {docs/img => images/docs}/install-windows-3-1.png  |  Bin
 {docs/img => images/docs}/install-windows-3.png    |  Bin
 {docs/img => images/docs}/install-windows-5-1.png  |  Bin
 {docs/img => images/docs}/install-windows-5.png    |  Bin
 {docs/img => images/docs}/install-windows-6-1.png  |  Bin
 {docs/img => images/docs}/install-windows-6.png    |  Bin
 images/docs/issue_count.png                        | 1238 +++
 .../img => images/docs}/jdbc_connection_tries.png  |  Bin
 {docs/img => images/docs}/jreport-addtable.png     |  Bin
 .../img => images/docs}/jreport-catalogbrowser.png |  Bin
 {docs/img => images/docs}/jreport-crosstab.png     |  Bin
 {docs/img => images/docs}/jreport-crosstab2.png    |  Bin
 {docs/img => images/docs}/jreport-crosstab3.png    |  Bin
 {docs/img => images/docs}/jreport-hostsfile.png    |  Bin
 {docs/img => images/docs}/jreport-queryeditor.png  |  Bin
 .../img => images/docs}/jreport-quotequalifier.png |  Bin
 {docs/img => images/docs}/jreport_setenv.png       |  Bin
 {docs/img => images/docs}/json-workaround.png      |  Bin
 .../img => images/docs}/kerberos-auth-process.png  |  Bin
 .../img => images/docs}/kerberos-client-server.png |  Bin
 {docs/img => images/docs}/kerberos-clnt-svr.png    |  Bin
 {docs/img => images/docs}/leaf-frag.png            |  Bin
 {docs/img => images/docs}/list_queries.png         |  Bin
 {docs/img => images/docs}/loginSandBox.png         |  Bin
 {docs/img => images/docs}/maj_frag_block.png       |  Bin
 {docs/img => images/docs}/min-frag.png             |  Bin
 .../docs}/multiple_drill_versions.jpg              |  Bin
 {docs/img => images/docs}/new-data-source.png      |  Bin
 {docs/img => images/docs}/ngram_plugin.png         |  Bin
 {docs/img => images/docs}/ngram_plugin2.png        |  Bin
 {docs/img => images/docs}/odbc-configure1.png      |  Bin
 {docs/img => images/docs}/odbc-configure2.png      |  Bin
 {docs/img => images/docs}/odbc-configure3.png      |  Bin
 {docs/img => images/docs}/odbc-configuretest.png   |  Bin
 {docs/img => images/docs}/odbc-create-as.png       |  Bin
 {docs/img => images/docs}/odbc-define-view.png     |  Bin
 {docs/img => images/docs}/odbc-drivers.png         |  Bin
 .../img => images/docs}/odbc-explorer-connect.png  |  Bin
 .../docs}/odbc-explorer-win-connect.png            |  Bin
 {docs/img => images/docs}/odbc-explorer-win.png    |  Bin
 {docs/img => images/docs}/odbc-mac1.png            |  Bin
 {docs/img => images/docs}/odbc-mac2.png            |  Bin
 {docs/img => images/docs}/odbc-mac3.png            |  Bin
 {docs/img => images/docs}/odbc-mapr-drill-apps.png |  Bin
 {docs/img => images/docs}/odbc-test.png            |  Bin
 {docs/img => images/docs}/odbc-user-dsn.png        |  Bin
 .../img => images/docs}/odbc_data_source_names.png |  Bin
 {docs/img => images/docs}/odbc_login.png           |  Bin
 {docs/img => images/docs}/odbctrace.png            |  Bin
 {docs/img => images/docs}/operator_block.png       |  Bin
 {docs/img => images/docs}/operator_table.png       |  Bin
 {docs/img => images/docs}/operators.png            |  Bin
 {docs/img => images/docs}/other-dbs-2.png          |  Bin
 {docs/img => images/docs}/other-dbs.png            |  Bin
 {docs/img => images/docs}/phys_plan_profile.png    |  Bin
 {docs/img => images/docs}/plain-auth-process.png   |  Bin
 {docs/img => images/docs}/plugin-default.png       |  Bin
 {docs/img => images/docs}/postman-config-body.png  |  Bin
 .../img => images/docs}/postman-config-headers.png |  Bin
 {docs/img => images/docs}/postman-config-http.png  |  Bin
 {docs/img => images/docs}/postman-icon.png         |  Bin
 .../img => images/docs}/postman-query-results.png  |  Bin
 {docs/img => images/docs}/postman-query.png        |  Bin
 {docs/img => images/docs}/query-1.png              |  Bin
 {docs/img => images/docs}/query-2.png              |  Bin
 {docs/img => images/docs}/query-flow-client.png    |  Bin
 {docs/img => images/docs}/query-plan-verify.png    |  Bin
 {docs/img => images/docs}/queryFlow.png            |  Bin
 .../docs}/query_directories_structure.png          |  Bin
 {docs/img => images/docs}/query_profile.png        |  Bin
 {docs/img => images/docs}/query_queuing.png        |  Bin
 {docs/img => images/docs}/queue-threshold.png      |  Bin
 {docs/img => images/docs}/queuing.png              |  Bin
 {docs/img => images/docs}/saiku_admin_screen.png   |  Bin
 .../img => images/docs}/saiku_choose_database.png  |  Bin
 {docs/img => images/docs}/saiku_datasource.png     |  Bin
 {docs/img => images/docs}/saiku_relation.png       |  Bin
 .../docs}/saiku_schema_designer_1.png              |  Bin
 .../docs}/saiku_schema_designer_2.png              |  Bin
 .../docs}/saiku_schema_designer_3.png              |  Bin
 .../docs}/saiku_schema_designer_4.png              |  Bin
 .../docs}/secure-communication-paths.png           |  Bin
 {docs/img => images/docs}/settings.png             |  Bin
 {docs/img => images/docs}/slide-15-638.png         |  Bin
 {docs/img => images/docs}/socialmed1.png           |  Bin
 {docs/img => images/docs}/socialmed10.png          |  Bin
 {docs/img => images/docs}/socialmed11.png          |  Bin
 {docs/img => images/docs}/socialmed12.png          |  Bin
 {docs/img => images/docs}/socialmed13.png          |  Bin
 {docs/img => images/docs}/socialmed2.png           |  Bin
 {docs/img => images/docs}/socialmed3.png           |  Bin
 {docs/img => images/docs}/socialmed4.png           |  Bin
 {docs/img => images/docs}/socialmed5.png           |  Bin
 {docs/img => images/docs}/socialmed6.png           |  Bin
 {docs/img => images/docs}/socialmed7.png           |  Bin
 {docs/img => images/docs}/socialmed8.png           |  Bin
 {docs/img => images/docs}/socialmed9.png           |  Bin
 .../img => images/docs}/spotfire-server-client.png |  Bin
 .../docs}/spotfire-server-configtab.png            |  Bin
 .../docs}/spotfire-server-connectionURL.png        |  Bin
 .../docs}/spotfire-server-database.png             |  Bin
 .../docs}/spotfire-server-datasources-tab.png      |  Bin
 .../docs}/spotfire-server-deployment.png           |  Bin
 .../docs}/spotfire-server-hiveorders.png           |  Bin
 .../docs}/spotfire-server-importconfig.png         |  Bin
 .../docs}/spotfire-server-infodesigner.png         |  Bin
 .../docs}/spotfire-server-infodesigner2.png        |  Bin
 .../docs}/spotfire-server-infolink.png             |  Bin
 {docs/img => images/docs}/spotfire-server-new.png  |  Bin
 .../docs}/spotfire-server-saveconfig.png           |  Bin
 .../docs}/spotfire-server-saveconfig2.png          |  Bin
 .../img => images/docs}/spotfire-server-start.png  |  Bin
 .../docs}/spotfire-server-template.png             |  Bin
 {docs/img => images/docs}/spotfire-server-tss.png  |  Bin
 {docs/img => images/docs}/spotfire_1.png           |  Bin
 {docs/img => images/docs}/spotfire_2.png           |  Bin
 {docs/img => images/docs}/spotfire_3.png           |  Bin
 {docs/img => images/docs}/spotfire_4.png           |  Bin
 {docs/img => images/docs}/spotfire_5.png           |  Bin
 {docs/img => images/docs}/spotfire_6.png           |  Bin
 {docs/img => images/docs}/sqlline1.png             |  Bin
 {docs/img => images/docs}/ssl-security.png         |  Bin
 {docs/img => images/docs}/step2_img1.png           |  Bin
 {docs/img => images/docs}/step2_img2.png           |  Bin
 {docs/img => images/docs}/step3_img1.png           |  Bin
 {docs/img => images/docs}/step4_img1.png           |  Bin
 {docs/img => images/docs}/step4_img2.png           |  Bin
 {docs/img => images/docs}/step4_img3.png           |  Bin
 {docs/img => images/docs}/step5_img1.png           |  Bin
 {docs/img => images/docs}/step5_img2.png           |  Bin
 {docs/img => images/docs}/step5_img3.png           |  Bin
 {docs/img => images/docs}/step5_img4.png           |  Bin
 {docs/img => images/docs}/step5_img5.png           |  Bin
 {docs/img => images/docs}/step6_img1.png           |  Bin
 {docs/img => images/docs}/step6_img2.png           |  Bin
 .../img => images/docs}/storage_plugin_config.png  |  Bin
 {docs/img => images/docs}/storagep-1.png           |  Bin
 {docs/img => images/docs}/storagep-2.png           |  Bin
 {docs/img => images/docs}/storageplugin.png        |  Bin
 {docs/img => images/docs}/student_hive.png         |  Bin
 {docs/img => images/docs}/submit_plan.png          |  Bin
 {docs/img => images/docs}/success.png              |  Bin
 .../img => images/docs}/tableau-desktop-query.png  |  Bin
 {docs/img => images/docs}/tableau-error.png        |  Bin
 {docs/img => images/docs}/tableau-join-key.png     |  Bin
 {docs/img => images/docs}/tableau-odbc-setup-2.png |  Bin
 {docs/img => images/docs}/tableau-odbc-setup.png   |  Bin
 {docs/img => images/docs}/tableau-schemas.png      |  Bin
 .../img => images/docs}/tableau-select-schema.png  |  Bin
 .../docs}/tableau-server-authentication.png        |  Bin
 .../docs}/tableau-server-publish-datasource.png    |  Bin
 .../docs}/tableau-server-publish-datasource2.png   |  Bin
 .../docs}/tableau-server-publish-datasource3.png   |  Bin
 .../docs}/tableau-server-publish1.png              |  Bin
 .../docs}/tableau-server-publish2.png              |  Bin
 .../img => images/docs}/tableau-server-signin1.png |  Bin
 .../img => images/docs}/tableau-server-signin2.png |  Bin
 {docs/img => images/docs}/throttling.png           |  Bin
 {docs/img => images/docs}/ui-export-all.png        |  Bin
 .../img => images/docs}/ui-limit-results-warn.png  |  Bin
 {docs/img => images/docs}/ui-options.PNG           |  Bin
 {docs/img => images/docs}/ui-order-duration.png    |  Bin
 {docs/img => images/docs}/ui-results-page.png      |  Bin
 {docs/img => images/docs}/ui-row-limit.png         |  Bin
 {docs/img => images/docs}/ui-spilled-data.png      |  Bin
 {docs/img => images/docs}/ui-submit-popup.png      |  Bin
 {docs/img => images/docs}/ui-turtle-wait.png       |  Bin
 {docs/img => images/docs}/ui-wait-op.png           |  Bin
 {docs/img => images/docs}/user_hops_four.PNG       |  Bin
 .../img => images/docs}/user_hops_joined_view.PNG  |  Bin
 {docs/img => images/docs}/user_hops_no_join.PNG    |  Bin
 {docs/img => images/docs}/value1.png               |  Bin
 {docs/img => images/docs}/value2.png               |  Bin
 {docs/img => images/docs}/value3.png               |  Bin
 {docs/img => images/docs}/value4.png               |  Bin
 {docs/img => images/docs}/value5.png               |  Bin
 {docs/img => images/docs}/value6.png               |  Bin
 {docs/img => images/docs}/value7.png               |  Bin
 {docs/img => images/docs}/vbApplSettings.png       |  Bin
 {docs/img => images/docs}/vbEthernet.png           |  Bin
 {docs/img => images/docs}/vbGenSettings.png        |  Bin
 {docs/img => images/docs}/vbImport.png             |  Bin
 {docs/img => images/docs}/vbMaprSetting.png        |  Bin
 {docs/img => images/docs}/vbNetwork.png            |  Bin
 {docs/img => images/docs}/vbloginSandBox.png       |  Bin
 {docs/img => images/docs}/vis_graph.png            |  Bin
 {docs/img => images/docs}/vmLibrary.png            |  Bin
 {docs/img => images/docs}/vmShare.png              |  Bin
 {docs/img => images/docs}/vmWelcome.png            |  Bin
 {docs/img => images/docs}/web-ui-admin-view.png    |  Bin
 {docs/img => images/docs}/web-ui-export-config.PNG |  Bin
 {docs/img => images/docs}/web-ui-login.png         |  Bin
 {docs/img => images/docs}/web-ui-user-view.png     |  Bin
 {docs/img => images/docs}/web-ui.png               |  Bin
 {docs/img => images/docs}/web_ui_reset_default.PNG |  Bin
 {docs/img => images/docs}/winsettings.png          |  Bin
 {docs/img => images/docs}/winstart.png             |  Bin
 {docs/img => images/docs}/xx-xx-xx.png             |  Bin
 index.html                                         |    6 +-
 mailinglists/index.html                            |   45 +-
 overview/index.html                                |    2 +-
 poweredBy/index.html                               |    2 +-
 redirects.json                                     |    1 +
 search/index.html                                  |    2 +-
 team/index.html                                    |  429 +-
 why/index.html                                     |   50 +-
 713 files changed, 55654 insertions(+), 40931 deletions(-)

diff --git a/README.md b/README.md
index 2201f70..30e496c 100644
--- a/README.md
+++ b/README.md
@@ -16,7 +16,7 @@ Please make sure that specific versions of libraries are installed since buildin
 
 # Documentation Guidelines
 
-The documentation pages are placed under `_docs`. You can modify existing .md files, or you can create new .md files to add to the Apache Drill documentation site. Create pull requests to submit your documentation updates. 
+The documentation pages are placed under `_docs`. You can modify existing .md files, or you can create new .md files to add to the Apache Drill documentation site. Create pull requests to submit your documentation updates. The Kramdown MarkDown processor employed by Jekyll supports [a dialect of MarkDown](https://kramdown.gettalong.org/quickref.html) which is a superset of standard MarkDown.
 
 ## Creating New MarkDown Files
 
diff --git a/apacheASF/index.html b/apacheASF/index.html
index 4dc1ceb..812bb4c 100644
--- a/apacheASF/index.html
+++ b/apacheASF/index.html
@@ -140,7 +140,7 @@
   <p class="push"></p>
 <div id="footer" class="mw">
 <div class="wrapper">
-Copyright © 2012-2014 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
+Copyright © 2012-2020 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
 Apache and the Apache feather logo are trademarks of The Apache Software Foundation. Other names appearing on the site may be trademarks of their respective owners.<br/><br/>
 </div>
 </div>
diff --git a/architecture/index.html b/architecture/index.html
index a2b5b03..5ab0a21 100644
--- a/architecture/index.html
+++ b/architecture/index.html
@@ -208,7 +208,7 @@
   <p class="push"></p>
 <div id="footer" class="mw">
 <div class="wrapper">
-Copyright © 2012-2014 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
+Copyright © 2012-2020 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
 Apache and the Apache feather logo are trademarks of The Apache Software Foundation. Other names appearing on the site may be trademarks of their respective owners.<br/><br/>
 </div>
 </div>
diff --git a/blog/2014/11/19/sql-on-mongodb/index.html b/blog/2014/11/19/sql-on-mongodb/index.html
index 4f527cf..6f89b38 100644
--- a/blog/2014/11/19/sql-on-mongodb/index.html
+++ b/blog/2014/11/19/sql-on-mongodb/index.html
@@ -153,141 +153,163 @@
 <p>The instructions are divided into the following subtopics:</p>
 
 <ul>
-<li>Drill and Mongo setup (standalone/replicated/sharded)</li>
-<li>Running queries</li>
-<li>Securely accessing MongoDB</li>
-<li>Optimizations</li>
+  <li>Drill and Mongo setup (standalone/replicated/sharded)</li>
+  <li>Running queries</li>
+  <li>Securely accessing MongoDB</li>
+  <li>Optimizations</li>
 </ul>
 
-<h2 id="drill-and-mongodb-setup-standalone-replicated-sharded">Drill and MongoDB Setup (Standalone/Replicated/Sharded)</h2>
+<h2 id="drill-and-mongodb-setup-standalonereplicatedsharded">Drill and MongoDB Setup (Standalone/Replicated/Sharded)</h2>
 
 <h3 id="standalone">Standalone</h3>
-
 <ul>
-<li>Start <code>mongod</code> process (<a href="http://docs.mongodb.org/manual/installation/">Install MongoDB</a>)</li>
-<li>Start Drill in embedded mode (<a href="https://cwiki.apache.org/confluence/display/DRILL/Installing+Drill+in+Embedded+Mode">Installing Drill in Embedded Mode</a> &amp; <a href="https://cwiki.apache.org/confluence/pages/viewpage.action?pageId=44994063">Starting/Stopping Drill</a>) </li>
-<li>Access the Web UI through the local drillbit: <a href="http://localhost:8047/">http://localhost:8047/</a></li>
-<li><p>Enable the Mongo storage plugin and update its configuration:</p>
-<div class="highlight"><pre><code class="language-json" data-lang="json"><span class="p">{</span>
-  <span class="nt">&quot;type&quot;</span><span class="p">:</span> <span class="s2">&quot;mongo&quot;</span><span class="p">,</span>
-  <span class="nt">&quot;connection&quot;</span><span class="p">:</span> <span class="s2">&quot;mongodb://localhost:27017&quot;</span><span class="p">,</span>
-  <span class="nt">&quot;enabled&quot;</span><span class="p">:</span> <span class="kc">true</span>
-<span class="p">}</span>
-</code></pre></div></li>
+  <li>Start <code class="language-plaintext highlighter-rouge">mongod</code> process (<a href="http://docs.mongodb.org/manual/installation/">Install MongoDB</a>)</li>
+  <li>Start Drill in embedded mode (<a href="https://cwiki.apache.org/confluence/display/DRILL/Installing+Drill+in+Embedded+Mode">Installing Drill in Embedded Mode</a> &amp; <a href="https://cwiki.apache.org/confluence/pages/viewpage.action?pageId=44994063">Starting/Stopping Drill</a>)</li>
+  <li>Access the Web UI through the local drillbit: <a href="http://localhost:8047/">http://localhost:8047/</a></li>
+  <li>
+    <p>Enable the Mongo storage plugin and update its configuration:</p>
+
+    <div class="language-json highlighter-rouge"><div class="highlight"><pre class="highlight"><code><span class="w">  </span><span class="p">{</span><span class="w">
+    </span><span class="nl">"type"</span><span class="p">:</span><span class="w"> </span><span class="s2">"mongo"</span><span class="p">,</span><span class="w">
+    </span><span class="nl">"connection"</span><span class="p">:</span><span class="w"> </span><span class="s2">"mongodb://localhost:27017"</span><span class="p">,</span><span class="w">
+    </span><span class="nl">"enabled"</span><span class="p">:</span><span class="w"> </span><span class="kc">true</span><span class="w">
+  </span><span class="p">}</span><span class="w">
+</span></code></pre></div>    </div>
+
+    <p>By default, <code class="language-plaintext highlighter-rouge">mongod</code> listens on port 27017.</p>
+  </li>
 </ul>
 
-<p>By default, <code>mongod</code> listens on port 27017.</p>
-
-<p><img src="/static/sql-on-mongodb/standalone.png" alt="Drill on MongoDB in standalone mode"></p>
+<p><img src="/static/sql-on-mongodb/standalone.png" alt="Drill on MongoDB in standalone mode" /></p>
 
 <h3 id="replica-set">Replica Set</h3>
-
 <ul>
-<li>Start <code>mongod</code> processes in replication mode</li>
-<li>Start Drill in distributed mode (<a href="https://cwiki.apache.org/confluence/display/DRILL/Installing+Drill+in+Distributed+Mode">Installing Drill in Distributed Mode</a> &amp; <a href="https://cwiki.apache.org/confluence/pages/viewpage.action?pageId=44994063">Starting/Stopping Drill</a>)</li>
-<li>Access the Web UI through any drillbit: <a href="http://drillbit2:8047">http://drillbit2:8047</a></li>
-<li><p>Enable the Mongo storage plugin and update its configuration:</p>
-<div class="highlight"><pre><code class="language-json" data-lang="json"><span class="p">{</span>
-  <span class="nt">&quot;type&quot;</span><span class="p">:</span> <span class="s2">&quot;mongo&quot;</span><span class="p">,</span>
-  <span class="nt">&quot;connection&quot;</span><span class="p">:</span> <span class="s2">&quot;mongodb://&lt;host1&gt;:&lt;port1&gt;,&lt;host2&gt;:&lt;port2&gt;&quot;</span><span class="p">,</span>
-  <span class="nt">&quot;enabled&quot;</span><span class="p">:</span> <span class="kc">true</span>
-<span class="p">}</span>
-</code></pre></div>
-<p>Where <code>host1</code> and <code>host2</code> are <code>mongod</code> hostnames in the replica set.</p></li>
+  <li>Start <code class="language-plaintext highlighter-rouge">mongod</code> processes in replication mode</li>
+  <li>Start Drill in distributed mode (<a href="https://cwiki.apache.org/confluence/display/DRILL/Installing+Drill+in+Distributed+Mode">Installing Drill in Distributed Mode</a> &amp; <a href="https://cwiki.apache.org/confluence/pages/viewpage.action?pageId=44994063">Starting/Stopping Drill</a>)</li>
+  <li>Access the Web UI through any drillbit: <a href="http://drillbit2:8047">http://drillbit2:8047</a></li>
+  <li>
+    <p>Enable the Mongo storage plugin and update its configuration:</p>
+
+    <div class="language-json highlighter-rouge"><div class="highlight"><pre class="highlight"><code><span class="w">  </span><span class="p">{</span><span class="w">
+    </span><span class="nl">"type"</span><span class="p">:</span><span class="w"> </span><span class="s2">"mongo"</span><span class="p">,</span><span class="w">
+    </span><span class="nl">"connection"</span><span class="p">:</span><span class="w"> </span><span class="s2">"mongodb://&lt;host1&gt;:&lt;port1&gt;,&lt;host2&gt;:&lt;port2&gt;"</span><span class="p">,</span><span class="w">
+    </span><span class="nl">"enabled"</span><span class="p">:</span><span class="w"> </span><span class="kc">true</span><span class="w">
+  </span><span class="p">}</span><span class="w">
+</span></code></pre></div>    </div>
+
+    <p>Where <code class="language-plaintext highlighter-rouge">host1</code> and <code class="language-plaintext highlighter-rouge">host2</code> are <code class="language-plaintext highlighter-rouge">mongod</code> hostnames in the replica set.</p>
+  </li>
 </ul>
 
-<p><img src="/static/sql-on-mongodb/replicated.png" alt="Drill on MongoDB in replicated mode"></p>
+<p><img src="/static/sql-on-mongodb/replicated.png" alt="Drill on MongoDB in replicated mode" /></p>
 
-<p>In replicated mode, whichever drillbit receives the query connects to the nearest <code>mongod</code> (local <code>mongod</code>) to read the data.</p>
+<p>In replicated mode, whichever drillbit receives the query connects to the nearest <code class="language-plaintext highlighter-rouge">mongod</code> (local <code class="language-plaintext highlighter-rouge">mongod</code>) to read the data.</p>
 
-<h3 id="sharded-sharded-with-replica-set">Sharded/Sharded with Replica Set</h3>
+<h3 id="shardedsharded-with-replica-set">Sharded/Sharded with Replica Set</h3>
 
 <ul>
-<li>Start Mongo processes in sharded mode</li>
-<li>Start Drill in distributed mode (<a href="https://cwiki.apache.org/confluence/display/DRILL/Installing+Drill+in+Distributed+Mode">Installing Drill in Distributed Mode</a> &amp; <a href="https://cwiki.apache.org/confluence/pages/viewpage.action?pageId=44994063">Starting/Stopping Drill</a>)</li>
-<li>Access the Web UI through any drillbit: <a href="http://drillbit3:8047">http://drillbit3:8047</a></li>
-<li><p>Enable the Mongo storage plugin and update its configuration:</p>
-<div class="highlight"><pre><code class="language-json" data-lang="json"><span class="p">{</span> 
-  <span class="nt">&quot;type&quot;</span><span class="p">:</span> <span class="s2">&quot;mongo&quot;</span><span class="p">,</span>
-  <span class="nt">&quot;connection&quot;</span><span class="p">:</span> <span class="s2">&quot;mongodb://&lt;host1&gt;:&lt;port1&gt;,&lt;host2&gt;:&lt;port2&gt;&quot;</span><span class="p">,</span>
-  <span class="nt">&quot;enabled&quot;</span><span class="p">:</span> <span class="kc">true</span>
-<span class="p">}</span>
-</code></pre></div>
-<p>Where <code>host1</code> and <code>host2</code> are the <code>mongos</code> hostnames.</p></li>
+  <li>Start Mongo processes in sharded mode</li>
+  <li>Start Drill in distributed mode (<a href="https://cwiki.apache.org/confluence/display/DRILL/Installing+Drill+in+Distributed+Mode">Installing Drill in Distributed Mode</a> &amp; <a href="https://cwiki.apache.org/confluence/pages/viewpage.action?pageId=44994063">Starting/Stopping Drill</a>)</li>
+  <li>Access the Web UI through any drillbit: <a href="http://drillbit3:8047">http://drillbit3:8047</a></li>
+  <li>
+    <p>Enable the Mongo storage plugin and update its configuration:</p>
+
+    <div class="language-json highlighter-rouge"><div class="highlight"><pre class="highlight"><code><span class="w">  </span><span class="p">{</span><span class="w"> 
+    </span><span class="nl">"type"</span><span class="p">:</span><span class="w"> </span><span class="s2">"mongo"</span><span class="p">,</span><span class="w">
+    </span><span class="nl">"connection"</span><span class="p">:</span><span class="w"> </span><span class="s2">"mongodb://&lt;host1&gt;:&lt;port1&gt;,&lt;host2&gt;:&lt;port2&gt;"</span><span class="p">,</span><span class="w">
+    </span><span class="nl">"enabled"</span><span class="p">:</span><span class="w"> </span><span class="kc">true</span><span class="w">
+  </span><span class="p">}</span><span class="w">
+</span></code></pre></div>    </div>
+
+    <p>Where <code class="language-plaintext highlighter-rouge">host1</code> and <code class="language-plaintext highlighter-rouge">host2</code> are the <code class="language-plaintext highlighter-rouge">mongos</code> hostnames.</p>
+  </li>
 </ul>
 
-<p><img src="/static/sql-on-mongodb/sharded.png" alt="Drill on MongoDB in sharded mode"></p>
+<p><img src="/static/sql-on-mongodb/sharded.png" alt="Drill on MongoDB in sharded mode" /></p>
 
-<p>In sharded mode, drillbit first connects to the <code>mongos</code> server to get the shard information.</p>
+<p>In sharded mode, drillbit first connects to the <code class="language-plaintext highlighter-rouge">mongos</code> server to get the shard information.</p>
 
 <h2 id="endpoint-assignments">Endpoint Assignments</h2>
 
 <p>Drill is designed to maximize data locality:</p>
 
 <ul>
-<li>When drillbits and shards run together on the same machines, each drillbit (endpoint) will read the chunks from the local shard. That is, all the chunks from a shard will be assigned to its local drillbit. This is known as data locality, and is the ideal scenario.</li>
-<li>When all drillbits and shards are running on different machines, chunks will be assigned to drillbits in a round-robin fashion. In this case there is no data locality.</li>
-<li>When some of drillbits and shards are colocated, and some of them are running on different machines, partial data locality is achieved.</li>
+  <li>When drillbits and shards run together on the same machines, each drillbit (endpoint) will read the chunks from the local shard. That is, all the chunks from a shard will be assigned to its local drillbit. This is known as data locality, and is the ideal scenario.</li>
+  <li>When all drillbits and shards are running on different machines, chunks will be assigned to drillbits in a round-robin fashion. In this case there is no data locality.</li>
+  <li>When some of drillbits and shards are colocated, and some of them are running on different machines, partial data locality is achieved.</li>
 </ul>
 
 <h2 id="running-queries">Running Queries</h2>
 
-<p>Here is a simple exercise that provides steps for creating an <code>empinfo</code> collection in an <code>employee</code> database in Mongo that you can query using Drill:</p>
+<p>Here is a simple exercise that provides steps for creating an <code class="language-plaintext highlighter-rouge">empinfo</code> collection in an <code class="language-plaintext highlighter-rouge">employee</code> database in Mongo that you can query using Drill:</p>
 
 <ol>
-<li>Download <a href="http://media.mongodb.org/zips.json">zips.json</a> and the <a href="/static/sql-on-mongodb/empinfo.json">empinfo.json</a> dataset referenced at the end of blog.</li>
-<li><p>Import the zips.json and empinfo.json files into Mongo using the following command:  </p>
-<div class="highlight"><pre><code class="language-bash" data-lang="bash">mongoimport --host localhost --db <span class="nb">test</span> --collection zips &lt; zips.json
-mongoimport --host localhost --db employee --collection empinfo &lt; empinfo.json
-</code></pre></div></li>
-<li><p>Issue the following queries either from sqlline (Drill’s shell) or from the Drill Web UI to get corresponding results from the Mongo collection. </p>
+  <li>Download <a href="http://media.mongodb.org/zips.json">zips.json</a> and the <a href="/static/sql-on-mongodb/empinfo.json">empinfo.json</a> dataset referenced at the end of blog.</li>
+  <li>
+    <p>Import the zips.json and empinfo.json files into Mongo using the following command:</p>
 
-<ul>
-<li>To issue queries from the web UI, open the Drill web UI and go to Query tab. </li>
-<li><p>To issue queries from sqlline, connect to sqlline using the following command: </p>
-<div class="highlight"><pre><code class="language-bash" data-lang="bash">&lt;DRILLHOME&gt;/bin/sqlline -u jdbc:drill:zk<span class="o">=</span>zkhost:2181 -n admin -p admin
-</code></pre></div></li>
-</ul></li>
-<li><p>Queries:</p>
-<div class="highlight"><pre><code class="language-sql" data-lang="sql"><span class="k">SELECT</span> <span class="n">first_name</span><span class="p">,</span> <span class="n">last_name</span><span class="p">,</span> <span class="n">position_id</span>
-<span class="k">FROM</span> <span class="n">mongo</span><span class="p">.</span><span class="n">employee</span><span class="p">.</span><span class="o">`</span><span class="n">empinfo</span><span class="o">`</span>
-<span class="k">WHERE</span> <span class="n">employee_id</span> <span class="o">=</span> <span class="mi">1107</span> <span class="k">AND</span> <span class="n">position_id</span> <span class="o">=</span> <span class="mi">17</span> <span class="k">AND</span> <span class="n">last_name</span> <span class="o">=</span> <span class="s1">&#39;Yonce&#39;</span><span class="p">;</span>  
-
-<span class="k">SELECT</span> <span class="n">city</span><span class="p">,</span> <span class="k">sum</span><span class="p">(</span><span class="n">pop</span><span class="p">)</span>
-<span class="k">FROM</span> <span class="n">mongo</span><span class="p">.</span><span class="n">test</span><span class="p">.</span><span class="o">`</span><span class="n">zips</span><span class="o">`</span> <span class="n">zipcodes</span>
-<span class="k">WHERE</span> <span class="k">state</span> <span class="k">IS</span> <span class="k">NOT</span> <span class="k">NULL</span> <span class="k">GROUP</span> <span class="k">BY</span> <span class="n">city</span>
-<span class="k">ORDER</span> <span class="k">BY</span> <span class="k">sum</span><span class="p">(</span><span class="n">pop</span><span class="p">)</span> <span class="k">DESC</span> <span class="k">LIMIT</span> <span class="mi">1</span><span class="p">;</span>
-</code></pre></div></li>
+    <div class="language-bash highlighter-rouge"><div class="highlight"><pre class="highlight"><code> mongoimport <span class="nt">--host</span> localhost <span class="nt">--db</span> <span class="nb">test</span> <span class="nt">--collection</span> zips &lt; zips.json
+ mongoimport <span class="nt">--host</span> localhost <span class="nt">--db</span> employee <span class="nt">--collection</span> empinfo &lt; empinfo.json
+</code></pre></div>    </div>
+  </li>
+  <li>Issue the following queries either from sqlline (Drill’s shell) or from the Drill Web UI to get corresponding results from the Mongo collection.
+    <ul>
+      <li>To issue queries from the web UI, open the Drill web UI and go to Query tab.</li>
+      <li>
+        <p>To issue queries from sqlline, connect to sqlline using the following command:</p>
+
+        <div class="language-bash highlighter-rouge"><div class="highlight"><pre class="highlight"><code>   &lt;DRILLHOME&gt;/bin/sqlline <span class="nt">-u</span> jdbc:drill:zk<span class="o">=</span>zkhost:2181 <span class="nt">-n</span> admin <span class="nt">-p</span> admin
+</code></pre></div>        </div>
+      </li>
+    </ul>
+  </li>
+  <li>
+    <p>Queries:</p>
+
+    <div class="language-sql highlighter-rouge"><div class="highlight"><pre class="highlight"><code> <span class="k">SELECT</span> <span class="n">first_name</span><span class="p">,</span> <span class="n">last_name</span><span class="p">,</span> <span class="n">position_id</span>
+ <span class="k">FROM</span> <span class="n">mongo</span><span class="p">.</span><span class="n">employee</span><span class="p">.</span><span class="nv">`empinfo`</span>
+ <span class="k">WHERE</span> <span class="n">employee_id</span> <span class="o">=</span> <span class="mi">1107</span> <span class="k">AND</span> <span class="n">position_id</span> <span class="o">=</span> <span class="mi">17</span> <span class="k">AND</span> <span class="n">last_name</span> <span class="o">=</span> <span class="s1">'Yonce'</span><span class="p">;</span>  
+    
+ <span class="k">SELECT</span> <span class="n">city</span><span class="p">,</span> <span class="k">sum</span><span class="p">(</span><span class="n">pop</span><span class="p">)</span>
+ <span class="k">FROM</span> <span class="n">mongo</span><span class="p">.</span><span class="n">test</span><span class="p">.</span><span class="nv">`zips`</span> <span class="n">zipcodes</span>
+ <span class="k">WHERE</span> <span class="k">state</span> <span class="k">IS</span> <span class="k">NOT</span> <span class="k">NULL</span> <span class="k">GROUP</span> <span class="k">BY</span> <span class="n">city</span>
+ <span class="k">ORDER</span> <span class="k">BY</span> <span class="k">sum</span><span class="p">(</span><span class="n">pop</span><span class="p">)</span> <span class="k">DESC</span> <span class="k">LIMIT</span> <span class="mi">1</span><span class="p">;</span>
+</code></pre></div>    </div>
+  </li>
 </ol>
 
-<p><em>Note</em>: If a field contains a mixture of different data types across different records, such as both int and decimal values, then queries fail unless <code>store.mongo.all_text_mode = true</code> and aggregations fail in that case. For more information refer to <a href="https://issues.apache.org/jira/browse/DRILL-1475">DRILL-1475</a> and <a href="https://issues.apache.org/jira/browse/DRILL-1460">DRILL-1460</a>.</p>
+<p><em>Note</em>: If a field contains a mixture of different data types across different records, such as both int and decimal values, then queries fail unless <code class="language-plaintext highlighter-rouge">store.mongo.all_text_mode = true</code> and aggregations fail in that case. For more information refer to <a href="https://issues.apache.org/jira/browse/DRILL-1475">DRILL-1475</a> and <a href="https://issues.apache.org/jira/browse/DRILL-1460">DRILL-1460</a>.</p>
 
-<p>To set <code>store.mongo.all_text_mode = true</code>, execute the following command in sqlline:</p>
-<div class="highlight"><pre><code class="language-sql" data-lang="sql"><span class="k">alter</span> <span class="k">session</span> <span class="k">set</span> <span class="n">store</span><span class="p">.</span><span class="n">mongo</span><span class="p">.</span><span class="n">all_text_mode</span> <span class="o">=</span> <span class="k">true</span>
-</code></pre></div>
-<h2 id="securely-accessing-mongodb">Securely Accessing MongoDB</h2>
+<p>To set <code class="language-plaintext highlighter-rouge">store.mongo.all_text_mode = true</code>, execute the following command in sqlline:</p>
+
+<div class="language-sql highlighter-rouge"><div class="highlight"><pre class="highlight"><code><span class="k">alter</span> <span class="k">session</span> <span class="k">set</span> <span class="n">store</span><span class="p">.</span><span class="n">mongo</span><span class="p">.</span><span class="n">all_text_mode</span> <span class="o">=</span> <span class="k">true</span>
+</code></pre></div></div>
 
+<h2 id="securely-accessing-mongodb">Securely Accessing MongoDB</h2>
 <p>Create two databases, emp and zips. For each database, create a user with read privileges. As an example, for the zips database, create a user “apache” with read privileges. For the emp database, create a user “drill” with read privileges.</p>
 
 <p>The apache user will be able to query the zips database by using the following storage plugin configuration:</p>
-<div class="highlight"><pre><code class="language-json" data-lang="json"><span class="p">{</span> 
-  <span class="nt">&quot;type&quot;</span><span class="p">:</span> <span class="s2">&quot;mongo&quot;</span><span class="p">,</span>
-  <span class="nt">&quot;connection&quot;</span><span class="p">:</span> <span class="s2">&quot;mongodb://apache:apache@localhost:27017/zips&quot;</span><span class="p">,</span>
-  <span class="nt">&quot;enabled&quot;</span><span class="p">:</span> <span class="kc">true</span>
-<span class="p">}</span>
-</code></pre></div>
-<p>The <code>drill</code> user will be able to query the <code>emp</code> database by using the following storage plugin configuration:</p>
-<div class="highlight"><pre><code class="language-json" data-lang="json"><span class="p">{</span> 
-  <span class="nt">&quot;type&quot;</span><span class="p">:</span> <span class="s2">&quot;mongo&quot;</span><span class="p">,</span>
-  <span class="nt">&quot;connection&quot;</span><span class="p">:</span> <span class="s2">&quot;mongodb://drill:drill@localhost:27017/emp&quot;</span><span class="p">,</span>
-  <span class="nt">&quot;enabled&quot;</span><span class="p">:</span> <span class="kc">true</span> 
-<span class="p">}</span>
-</code></pre></div>
+
+<div class="language-json highlighter-rouge"><div class="highlight"><pre class="highlight"><code><span class="p">{</span><span class="w"> 
+  </span><span class="nl">"type"</span><span class="p">:</span><span class="w"> </span><span class="s2">"mongo"</span><span class="p">,</span><span class="w">
+  </span><span class="nl">"connection"</span><span class="p">:</span><span class="w"> </span><span class="s2">"mongodb://apache:apache@localhost:27017/zips"</span><span class="p">,</span><span class="w">
+  </span><span class="nl">"enabled"</span><span class="p">:</span><span class="w"> </span><span class="kc">true</span><span class="w">
+</span><span class="p">}</span><span class="w">
+</span></code></pre></div></div>
+
+<p>The <code class="language-plaintext highlighter-rouge">drill</code> user will be able to query the <code class="language-plaintext highlighter-rouge">emp</code> database by using the following storage plugin configuration:</p>
+
+<div class="language-json highlighter-rouge"><div class="highlight"><pre class="highlight"><code><span class="p">{</span><span class="w"> 
+  </span><span class="nl">"type"</span><span class="p">:</span><span class="w"> </span><span class="s2">"mongo"</span><span class="p">,</span><span class="w">
+  </span><span class="nl">"connection"</span><span class="p">:</span><span class="w"> </span><span class="s2">"mongodb://drill:drill@localhost:27017/emp"</span><span class="p">,</span><span class="w">
+  </span><span class="nl">"enabled"</span><span class="p">:</span><span class="w"> </span><span class="kc">true</span><span class="w"> 
+</span><span class="p">}</span><span class="w">
+</span></code></pre></div></div>
+
 <p><em>Note</em>: The security patch may be included in next release. Check <a href="https://issues.apache.org/jira/browse/DRILL-1502">DRILL-1502</a> for status.</p>
 
 <h2 id="optimizations">Optimizations</h2>
-
-<p>The MongoDB storage plugin supports predicate pushdown and projection pushdown. As of now, predicate pushdown is implemented for the following filters: <code>&gt;</code>, <code>&gt;=</code>, <code>&lt;</code>, <code>&lt;=</code>, <code>==</code>, <code>!=</code>, <code>isNull</code> and <code>isNotNull</code>.</p>
+<p>The MongoDB storage plugin supports predicate pushdown and projection pushdown. As of now, predicate pushdown is implemented for the following filters: <code class="language-plaintext highlighter-rouge">&gt;</code>, <code class="language-plaintext highlighter-rouge">&gt;=</code>, <code class="language-plaintext highlighter-rouge">&lt;</code>, <code class="language-plaintext highlighter-rouge">&lt;=</code>, <code class="language-plaintext highlighter-rouge">==</code>, <code class="lang [...]
 
 <p>We are excited about the release of the MongoDB storage plugin, and we believe that Drill is the perfect SQL query tool for MongoDB.</p>
 
@@ -313,7 +335,7 @@ mongoimport --host localhost --db employee --collection empinfo &lt; empinfo.jso
   <p class="push"></p>
 <div id="footer" class="mw">
 <div class="wrapper">
-Copyright © 2012-2014 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
+Copyright © 2012-2020 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
 Apache and the Apache feather logo are trademarks of The Apache Software Foundation. Other names appearing on the site may be trademarks of their respective owners.<br/><br/>
 </div>
 </div>
diff --git a/blog/2014/12/02/drill-top-level-project/index.html b/blog/2014/12/02/drill-top-level-project/index.html
index 8ba56d2..fac407e 100644
--- a/blog/2014/12/02/drill-top-level-project/index.html
+++ b/blog/2014/12/02/drill-top-level-project/index.html
@@ -137,7 +137,7 @@
   <div class="addthis_sharing_toolbox"></div>
 
   <article class="post-content">
-    <p>The Apache Software Foundation has just announced that it has promoted Drill to a top-level project at Apache, similar to other well-known projects like Apache Hadoop and httpd (the world&#39;s most popular Web server). This marks a significant accomplishment for the Drill community, and I wanted to personally thank everyone who has contributed to the project. It takes many people, and countless hours, to develop something as complex and innovative as Drill.</p>
+    <p>The Apache Software Foundation has just announced that it has promoted Drill to a top-level project at Apache, similar to other well-known projects like Apache Hadoop and httpd (the world’s most popular Web server). This marks a significant accomplishment for the Drill community, and I wanted to personally thank everyone who has contributed to the project. It takes many people, and countless hours, to develop something as complex and innovative as Drill.</p>
 
 <p>In this post I wanted to reflect on the past and future of Drill.</p>
 
@@ -149,34 +149,34 @@
 
 <p><strong>Applications</strong>: In previous decades, software development was a carefully orchestrated and planned process. The release cycles were often measured in years, and upgrades were infrequent. Today, Web and mobile applications are developed in a much more iterative fashion. The release cycles are measured in days or weeks, and upgrades are a non-issue. (What version of Salesforce.com or Google Maps are you using?)</p>
 
-<p><strong>Data</strong>: In previous decades, data was measured in MBs or GBs, and it was highly structured and denormalized. Today&#39;s data is often measured in TBs or PBs, and it tends to be multi-structured — a combination of unstructured, semi-structured and structured. The data comes from many different sources, including a variety of applications, devices and services, and its structure changes much more frequently.</p>
+<p><strong>Data</strong>: In previous decades, data was measured in MBs or GBs, and it was highly structured and denormalized. Today’s data is often measured in TBs or PBs, and it tends to be multi-structured — a combination of unstructured, semi-structured and structured. The data comes from many different sources, including a variety of applications, devices and services, and its structure changes much more frequently.</p>
 
 <h3 id="a-new-generation-of-datastores">A New Generation of Datastores</h3>
 
-<p>The relational database, which was invented in 1970, was not designed for these new processes and data volumes and structures. As a result, a new generation of datastores has emerged, including HDFS, NoSQL (HBase, MongoDB, etc.) and search (Elasticsearch, Solr).  These systems are schema-free (also known as &quot;dynamic schema&quot;). Applications, as opposed to DBAs, control the data structure, enabling more agility and flexibility. For example, an application developer can independ [...]
+<p>The relational database, which was invented in 1970, was not designed for these new processes and data volumes and structures. As a result, a new generation of datastores has emerged, including HDFS, NoSQL (HBase, MongoDB, etc.) and search (Elasticsearch, Solr).  These systems are schema-free (also known as “dynamic schema”). Applications, as opposed to DBAs, control the data structure, enabling more agility and flexibility. For example, an application developer can independently evol [...]
 
 <h2 id="the-need-for-a-new-query-engine">The Need for a New Query Engine</h2>
 
-<p>With data increasingly being stored in schema-free datastores (HDFS, HBase, MongoDB, etc.) and a variety of cloud services, users need a way to explore and analyze this data, and a way to visualize it with BI tools (reports, dashboards, etc.). In 2012 we decided to embark on a journey to create the world&#39;s next-generation SQL engine. We had several high-level requirements in mind:</p>
+<p>With data increasingly being stored in schema-free datastores (HDFS, HBase, MongoDB, etc.) and a variety of cloud services, users need a way to explore and analyze this data, and a way to visualize it with BI tools (reports, dashboards, etc.). In 2012 we decided to embark on a journey to create the world’s next-generation SQL engine. We had several high-level requirements in mind:</p>
 
 <ul>
-<li><strong>A schema-free data model.</strong> Schema-free datastores (HDFS, NoSQL, search) need a schema-free SQL engine. These datastores became popular for a reason, and we shouldn&#39;t expect organizations to sacrifice those advantages in order to enjoy SQL-based analytics and BI. Today&#39;s organizations need agility and flexibility to cope with the volume, variety and velocity associated with modern applications and data.<br></li>
-<li><strong>A standalone query engine that supports multiple data sources.</strong> Most companies now use a variety of best-of-breed datastores and services to store data. This is true not just for large Global 2000 companies, but also for small startups. For example, it is not uncommon for a startup to have data in MySQL, MongoDB, HBase and HDFS, as well as a variety of online services. ETL was hard even 10 years ago when data was static and 100x smaller than it is today, and in today& [...]
-<li><strong>Ease of use.</strong> The SQL engine can&#39;t be hard to setup and use. Analysts and developers should be able to download and use it without deploying any complex infrastructure such as Hadoop.</li>
-<li><strong>Scalability and performance.</strong> The SQL engine must support interactive queries. It can&#39;t be batch-oriented like Hive. In addition, it must be able to scale linearly from a small laptop or virtual machine to a large cluster with hundreds or thousands of powerful servers.</li>
+  <li><strong>A schema-free data model.</strong> Schema-free datastores (HDFS, NoSQL, search) need a schema-free SQL engine. These datastores became popular for a reason, and we shouldn’t expect organizations to sacrifice those advantages in order to enjoy SQL-based analytics and BI. Today’s organizations need agility and flexibility to cope with the volume, variety and velocity associated with modern applications and data.</li>
+  <li><strong>A standalone query engine that supports multiple data sources.</strong> Most companies now use a variety of best-of-breed datastores and services to store data. This is true not just for large Global 2000 companies, but also for small startups. For example, it is not uncommon for a startup to have data in MySQL, MongoDB, HBase and HDFS, as well as a variety of online services. ETL was hard even 10 years ago when data was static and 100x smaller than it is today, and in toda [...]
+  <li><strong>Ease of use.</strong> The SQL engine can’t be hard to setup and use. Analysts and developers should be able to download and use it without deploying any complex infrastructure such as Hadoop.</li>
+  <li><strong>Scalability and performance.</strong> The SQL engine must support interactive queries. It can’t be batch-oriented like Hive. In addition, it must be able to scale linearly from a small laptop or virtual machine to a large cluster with hundreds or thousands of powerful servers.</li>
 </ul>
 
-<p>With these requirements in mind, we decided to incubate a new project in 2012 in the Apache Software Foundation so that a community of vendors and developers could come together and develop the technology. (One little known fact is that the name &quot;Drill&quot; was actually suggested by Google engineers due to its inspiration from Google&#39;s Dremel execution engine.)</p>
+<p>With these requirements in mind, we decided to incubate a new project in 2012 in the Apache Software Foundation so that a community of vendors and developers could come together and develop the technology. (One little known fact is that the name “Drill” was actually suggested by Google engineers due to its inspiration from Google’s Dremel execution engine.)</p>
 
 <p>After almost two years of research and development, we released Drill 0.4 in August, and continued with monthly releases since then.</p>
 
-<h2 id="whats-next">What&#39;s Next</h2>
+<h2 id="whats-next">What’s Next</h2>
 
-<p>Graduating to a top-level project is a significant milestone, but it&#39;s really just the beginning of the journey. In fact, we&#39;re currently wrapping up Drill 0.7, which includes hundreds of fixes and enhancements, and we expect to release that in the next couple weeks.</p>
+<p>Graduating to a top-level project is a significant milestone, but it’s really just the beginning of the journey. In fact, we’re currently wrapping up Drill 0.7, which includes hundreds of fixes and enhancements, and we expect to release that in the next couple weeks.</p>
 
 <p>Drill is currently being used by dozens of organizations, ranging from small startups to some of the largest Fortune 100s. These organizations are already gaining tremendous business value with Drill. As we march towards a 1.0 release early next year, these organizations are helping us shape the project and ensure that it meets the needs of a broad range of organizations as well as users (business analysts, technical analysts, data scientists and application developers). I would like  [...]
 
-<p>Happy Drilling!<br>
+<p>Happy Drilling!<br />
 Tomer Shiran</p>
 
   </article>
@@ -201,7 +201,7 @@ Tomer Shiran</p>
   <p class="push"></p>
 <div id="footer" class="mw">
 <div class="wrapper">
-Copyright © 2012-2014 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
+Copyright © 2012-2020 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
 Apache and the Apache feather logo are trademarks of The Apache Software Foundation. Other names appearing on the site may be trademarks of their respective owners.<br/><br/>
 </div>
 </div>
diff --git a/blog/2014/12/11/apache-drill-qa-panelist-spotlight/index.html b/blog/2014/12/11/apache-drill-qa-panelist-spotlight/index.html
index 6bd0e4c..69db79f 100644
--- a/blog/2014/12/11/apache-drill-qa-panelist-spotlight/index.html
+++ b/blog/2014/12/11/apache-drill-qa-panelist-spotlight/index.html
@@ -153,35 +153,30 @@
     <span class="_date_format">MM-DD-YYYY</span>
 </a></p>
 
-<p>Hadoop has always been a powerful platform, but it is even more so with the release of Apache Drill, a valuable technology for self-service data exploration on big data. For BI users, this is really exciting news. </p>
+<p>Hadoop has always been a powerful platform, but it is even more so with the release of Apache Drill, a valuable technology for self-service data exploration on big data. For BI users, this is really exciting news.</p>
 
-<p>With Apache Drill, you can immediately query complex data in native formats, such as schema-less data, nested data, and data with rapidly-evolving schemas. And with analytic tools likes Tableau, you can easily create queries, build dashboards and explore data. </p>
+<p>With Apache Drill, you can immediately query complex data in native formats, such as schema-less data, nested data, and data with rapidly-evolving schemas. And with analytic tools likes Tableau, you can easily create queries, build dashboards and explore data.</p>
 
-<p>Want to learn how to leverage Apache Drill in order to get better analytical insights? </p>
+<p>Want to learn how to leverage Apache Drill in order to get better analytical insights?</p>
 
 <p><strong>Join us on Twitter</strong> for a one-hour, live SQL-on-Hadoop Q&amp;A, next <strong>Wednesday, December 17th starting at 11:30am PST, 2:30pm EST</strong>. Use the <strong>hashtag #DrillQA</strong> so the panelists can engage with your questions and comments.</p>
 
 <p>Apache Drill committers Tomer Shiran, Jacques Nadeau, and Ted Dunning, as well as Tableau Product Manager Jeff Feng and Data Scientist Dr. Kirk Borne will be on hand to answer your questions.</p>
 
 <h2 id="tomer-shiran-apache-drill-founder-tshiran">Tomer Shiran, Apache Drill Founder (@tshiran)</h2>
-
 <p>Tomer Shiran is the founder of Apache Drill, and a PMC member and committer on the project. He is VP Product Management at MapR, responsible for product strategy, roadmap and new feature development. Prior to MapR, Tomer held numerous product management and engineering roles at Microsoft, most recently as the product manager for Microsoft Internet Security &amp; Acceleration Server (now Microsoft Forefront). He is the founder of two websites that have served tens of millions of users, [...]
 
 <h2 id="jeff-feng-product-manager-tableau-software-jtfeng">Jeff Feng, Product Manager Tableau Software (@jtfeng)</h2>
-
 <p>Jeff Feng is a Product Manager at Tableau and leads their Big Data product roadmap &amp; strategic vision.  In his role, he focuses on joint technology integration and partnership efforts with a number of Hadoop, NoSQL and web application partners in helping users see and understand their data.</p>
 
 <h2 id="ted-dunning-apache-drill-comitter-ted_dunning">Ted Dunning, Apache Drill Comitter (@Ted_Dunning)</h2>
-
 <p>Ted Dunning is Chief Applications Architect at MapR Technologies and committer and PMC member of the Apache Mahout, Apache ZooKeeper, and Apache Drill projects and mentor for Apache Storm. He contributed to Mahout clustering, classification and matrix decomposition algorithms  and helped expand the new version of Mahout Math library. Ted was the chief architect behind the MusicMatch (now Yahoo Music) and Veoh recommendation systems, he built fraud detection systems for ID Analytics (L [...]
 
 <h2 id="jacques-nadeau-vice-president-apache-drill-intjesus">Jacques Nadeau, Vice President, Apache Drill (@intjesus)</h2>
-
 <p>Jacques Nadeau leads Apache Drill development efforts at MapR Technologies. He is an industry veteran with over 15 years of big data and analytics experience. Most recently, he was cofounder and CTO of search engine startup YapMap. Before that, he was director of new product engineering with Quigo (contextual advertising, acquired by AOL in 2007). He also built the Avenue A | Razorfish analytics data warehousing system and associated services practice (acquired by Microsoft).</p>
 
 <h2 id="dr-kirk-borne-george-mason-university-kirkdborne">Dr. Kirk Borne, George Mason University (@KirkDBorne)</h2>
-
-<p>Dr. Kirk Borne is a Transdisciplinary Data Scientist and an Astrophysicist. He is Professor of Astrophysics and Computational Science in the George Mason University School of Physics, Astronomy, and Computational Sciences. He has been at Mason since 2003, where he teaches and advises students in the graduate and undergraduate Computational Science, Informatics, and Data Science programs. Previously, he spent nearly 20 years in positions supporting NASA projects, including an assignmen [...]
+<p>Dr. Kirk Borne is a Transdisciplinary Data Scientist and an Astrophysicist. He is Professor of Astrophysics and Computational Science in the George Mason University School of Physics, Astronomy, and Computational Sciences. He has been at Mason since 2003, where he teaches and advises students in the graduate and undergraduate Computational Science, Informatics, and Data Science programs. Previously, he spent nearly 20 years in positions supporting NASA projects, including an assignmen [...]
 
   </article>
  <div id="disqus_thread"></div>
@@ -205,7 +200,7 @@
   <p class="push"></p>
 <div id="footer" class="mw">
 <div class="wrapper">
-Copyright © 2012-2014 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
+Copyright © 2012-2020 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
 Apache and the Apache feather logo are trademarks of The Apache Software Foundation. Other names appearing on the site may be trademarks of their respective owners.<br/><br/>
 </div>
 </div>
diff --git a/blog/2014/12/16/whats-coming-in-2015/index.html b/blog/2014/12/16/whats-coming-in-2015/index.html
index fc81ea5..f13851a 100644
--- a/blog/2014/12/16/whats-coming-in-2015/index.html
+++ b/blog/2014/12/16/whats-coming-in-2015/index.html
@@ -137,26 +137,26 @@
   <div class="addthis_sharing_toolbox"></div>
 
   <article class="post-content">
-    <p>2014 was an exciting year for the Drill community. In August we made Drill available for downloads, and last week the Apache Software Foundation promoted Drill to a top-level project. Many of you have asked me what&#39;s coming next, so I decided to sit down and outline some of the interesting initiatives that the Drill community is currently working on:</p>
+    <p>2014 was an exciting year for the Drill community. In August we made Drill available for downloads, and last week the Apache Software Foundation promoted Drill to a top-level project. Many of you have asked me what’s coming next, so I decided to sit down and outline some of the interesting initiatives that the Drill community is currently working on:</p>
 
 <ul>
-<li>Flexible Access Control</li>
-<li>JSON in Any Shape or Form</li>
-<li>Advanced SQL</li>
-<li>New Data Sources</li>
-<li>Drill/Spark Integration</li>
-<li>Operational Enhancements: Speed, Scalability and Workload Management</li>
+  <li>Flexible Access Control</li>
+  <li>JSON in Any Shape or Form</li>
+  <li>Advanced SQL</li>
+  <li>New Data Sources</li>
+  <li>Drill/Spark Integration</li>
+  <li>Operational Enhancements: Speed, Scalability and Workload Management</li>
 </ul>
 
-<p>This is by no means intended to be an exhaustive list of everything that will be added to Drill in 2015. With Drill&#39;s rapidly expanding community, I anticipate that you&#39;ll see a whole lot more.</p>
+<p>This is by no means intended to be an exhaustive list of everything that will be added to Drill in 2015. With Drill’s rapidly expanding community, I anticipate that you’ll see a whole lot more.</p>
 
 <h2 id="flexible-access-control">Flexible Access Control</h2>
 
-<p>Many organizations are now interested in providing Drill as a service to their users, supporting many users, groups and organizations with a single cluster. To do so, they need to be able to control who can access what data. Today&#39;s volume and variety of data requires a new approach to access control. For example, it is becoming impractical for organizations to manage a standalone, centralized repository of permissions for every column/row of every table. Drill&#39;s virtual datas [...]
+<p>Many organizations are now interested in providing Drill as a service to their users, supporting many users, groups and organizations with a single cluster. To do so, they need to be able to control who can access what data. Today’s volume and variety of data requires a new approach to access control. For example, it is becoming impractical for organizations to manage a standalone, centralized repository of permissions for every column/row of every table. Drill’s virtual datasets (vie [...]
 
 <ul>
-<li>The user creates a virtual dataset (<code>CREATE VIEW vd AS SELECT ...</code>), selecting the data to be exposed/shared. The virtual dataset is defined as a SQL statement. For example, a virtual dataset may represent only the records that were created in the last 30 days and don&#39;t have the <code>restricted</code> flag. It could even mask some columns. Drill&#39;s virtual datasets (just the SQL statement) are stored as files in the file system, so users can leverage file system pe [...]
-<li>A virtual dataset is owned by a specific user and can only &quot;select&quot; data that the owner has access to. The data sources (HDFS, HBase, MongoDB, etc.) are responsible for access control decisions. Users and administrators do not need to define separate permissions inside Drill or utilize yet another centralized permission repository, such as Sentry and Ranger.</li>
+  <li>The user creates a virtual dataset (<code class="language-plaintext highlighter-rouge">CREATE VIEW vd AS SELECT ...</code>), selecting the data to be exposed/shared. The virtual dataset is defined as a SQL statement. For example, a virtual dataset may represent only the records that were created in the last 30 days and don’t have the <code class="language-plaintext highlighter-rouge">restricted</code> flag. It could even mask some columns. Drill’s virtual datasets (just the SQL sta [...]
+  <li>A virtual dataset is owned by a specific user and can only “select” data that the owner has access to. The data sources (HDFS, HBase, MongoDB, etc.) are responsible for access control decisions. Users and administrators do not need to define separate permissions inside Drill or utilize yet another centralized permission repository, such as Sentry and Ranger.</li>
 </ul>
 
 <h2 id="json-in-any-shape-or-form">JSON in Any Shape or Form</h2>
@@ -164,28 +164,35 @@
 <p>When data is <strong>Big</strong> (as in Big Data), it is painful to copy and transform it. Users should be able to explore the raw data without (or at least prior to) transforming it into another format. Drill is designed to enable in-situ analytics. Just point it at a file or directory and run the queries.</p>
 
 <p>JSON has emerged as the most common self-describing format, and Drill is able to query JSON files out of the box. Drill currently assumes that the JSON documents (or records) are stored sequentially in a file:</p>
-<div class="highlight"><pre><code class="language-json" data-lang="json"><span class="p">{</span> <span class="nt">&quot;name&quot;</span><span class="p">:</span> <span class="s2">&quot;Lee&quot;</span><span class="p">,</span> <span class="nt">&quot;yelping_since&quot;</span><span class="p">:</span> <span class="s2">&quot;2012-02&quot;</span> <span class="p">}</span>
-<span class="p">{</span> <span class="nt">&quot;name&quot;</span><span class="p">:</span> <span class="s2">&quot;Matthew&quot;</span><span class="p">,</span> <span class="nt">&quot;yelping_since&quot;</span><span class="p">:</span> <span class="s2">&quot;2011-12&quot;</span> <span class="p">}</span>
-<span class="p">{</span> <span class="nt">&quot;name&quot;</span><span class="p">:</span> <span class="s2">&quot;Jasmine&quot;</span><span class="p">,</span> <span class="nt">&quot;yelping_since&quot;</span><span class="p">:</span> <span class="s2">&quot;2010-09&quot;</span> <span class="p">}</span>
-</code></pre></div>
-<p>However, many JSON-based datasets, ranging from <a href="http://data.gov">data.gov</a> (government) datasets to Twitter API responses, are not organized as simple sequences of JSON documents. In some cases the actual records are listed as elements of an internal array inside a single JSON document. For example, consider the following file, which technically consists of a single JSON document, but really contains three records (under the <code>data.records</code> field):</p>
-<div class="highlight"><pre><code class="language-json" data-lang="json"><span class="p">{</span>
-  <span class="nt">&quot;metadata&quot;</span><span class="p">:</span> <span class="err">...</span><span class="p">,</span>
-  <span class="nt">&quot;data&quot;</span><span class="p">:</span> <span class="p">{</span>
-    <span class="nt">&quot;records&quot;</span><span class="p">:</span> <span class="p">[</span>
-      <span class="p">{</span> <span class="nt">&quot;name&quot;</span><span class="p">:</span> <span class="s2">&quot;Lee&quot;</span><span class="p">,</span> <span class="nt">&quot;yelping_since&quot;</span><span class="p">:</span> <span class="s2">&quot;2012-02&quot;</span> <span class="p">},</span>
-      <span class="p">{</span> <span class="nt">&quot;name&quot;</span><span class="p">:</span> <span class="s2">&quot;Matthew&quot;</span><span class="p">,</span> <span class="nt">&quot;yelping_since&quot;</span><span class="p">:</span> <span class="s2">&quot;2011-12&quot;</span> <span class="p">},</span>
-      <span class="p">{</span> <span class="nt">&quot;name&quot;</span><span class="p">:</span> <span class="s2">&quot;Jasmine&quot;</span><span class="p">,</span> <span class="nt">&quot;yelping_since&quot;</span><span class="p">:</span> <span class="s2">&quot;2010-09&quot;</span> <span class="p">}</span>
-    <span class="p">]</span>
-  <span class="p">}</span>
-<span class="p">}</span>
-</code></pre></div>
-<p>The <code>FLATTEN</code> function in Drill 0.7+ takes an array and converts each item into a top-level record:</p>
-<div class="highlight"><pre><code class="language-sql" data-lang="sql"><span class="k">SELECT</span> <span class="n">FLATTEN</span><span class="p">(</span><span class="k">data</span><span class="p">.</span><span class="n">records</span><span class="p">)</span> <span class="k">FROM</span> <span class="n">dfs</span><span class="p">.</span><span class="n">tmp</span><span class="p">.</span><span class="o">`</span><span class="n">foo</span><span class="p">.</span><span class="n">json</span><s [...]
-</code></pre></div>
+
+<div class="language-json highlighter-rouge"><div class="highlight"><pre class="highlight"><code><span class="p">{</span><span class="w"> </span><span class="nl">"name"</span><span class="p">:</span><span class="w"> </span><span class="s2">"Lee"</span><span class="p">,</span><span class="w"> </span><span class="nl">"yelping_since"</span><span class="p">:</span><span class="w"> </span><span class="s2">"2012-02"</span><span class="w"> </span><span class="p">}</span><span class="w">
+</span><span class="p">{</span><span class="w"> </span><span class="nl">"name"</span><span class="p">:</span><span class="w"> </span><span class="s2">"Matthew"</span><span class="p">,</span><span class="w"> </span><span class="nl">"yelping_since"</span><span class="p">:</span><span class="w"> </span><span class="s2">"2011-12"</span><span class="w"> </span><span class="p">}</span><span class="w">
+</span><span class="p">{</span><span class="w"> </span><span class="nl">"name"</span><span class="p">:</span><span class="w"> </span><span class="s2">"Jasmine"</span><span class="p">,</span><span class="w"> </span><span class="nl">"yelping_since"</span><span class="p">:</span><span class="w"> </span><span class="s2">"2010-09"</span><span class="w"> </span><span class="p">}</span><span class="w">
+</span></code></pre></div></div>
+
+<p>However, many JSON-based datasets, ranging from <a href="http://data.gov">data.gov</a> (government) datasets to Twitter API responses, are not organized as simple sequences of JSON documents. In some cases the actual records are listed as elements of an internal array inside a single JSON document. For example, consider the following file, which technically consists of a single JSON document, but really contains three records (under the <code class="language-plaintext highlighter-roug [...]
+
+<div class="language-json highlighter-rouge"><div class="highlight"><pre class="highlight"><code><span class="p">{</span><span class="w">
+  </span><span class="nl">"metadata"</span><span class="p">:</span><span class="w"> </span><span class="err">...</span><span class="p">,</span><span class="w">
+  </span><span class="nl">"data"</span><span class="p">:</span><span class="w"> </span><span class="p">{</span><span class="w">
+    </span><span class="nl">"records"</span><span class="p">:</span><span class="w"> </span><span class="p">[</span><span class="w">
+      </span><span class="p">{</span><span class="w"> </span><span class="nl">"name"</span><span class="p">:</span><span class="w"> </span><span class="s2">"Lee"</span><span class="p">,</span><span class="w"> </span><span class="nl">"yelping_since"</span><span class="p">:</span><span class="w"> </span><span class="s2">"2012-02"</span><span class="w"> </span><span class="p">},</span><span class="w">
+      </span><span class="p">{</span><span class="w"> </span><span class="nl">"name"</span><span class="p">:</span><span class="w"> </span><span class="s2">"Matthew"</span><span class="p">,</span><span class="w"> </span><span class="nl">"yelping_since"</span><span class="p">:</span><span class="w"> </span><span class="s2">"2011-12"</span><span class="w"> </span><span class="p">},</span><span class="w">
+      </span><span class="p">{</span><span class="w"> </span><span class="nl">"name"</span><span class="p">:</span><span class="w"> </span><span class="s2">"Jasmine"</span><span class="p">,</span><span class="w"> </span><span class="nl">"yelping_since"</span><span class="p">:</span><span class="w"> </span><span class="s2">"2010-09"</span><span class="w"> </span><span class="p">}</span><span class="w">
+    </span><span class="p">]</span><span class="w">
+  </span><span class="p">}</span><span class="w">
+</span><span class="p">}</span><span class="w">
+</span></code></pre></div></div>
+
+<p>The <code class="language-plaintext highlighter-rouge">FLATTEN</code> function in Drill 0.7+ takes an array and converts each item into a top-level record:</p>
+
+<div class="language-sql highlighter-rouge"><div class="highlight"><pre class="highlight"><code><span class="k">SELECT</span> <span class="n">FLATTEN</span><span class="p">(</span><span class="k">data</span><span class="p">.</span><span class="n">records</span><span class="p">)</span> <span class="k">FROM</span> <span class="n">dfs</span><span class="p">.</span><span class="n">tmp</span><span class="p">.</span><span class="nv">`foo.json`</span><span class="p">;</span>
+</code></pre></div></div>
+
 <p>You can use this as an inner query (or inside a view):</p>
-<div class="highlight"><pre><code class="language-sql" data-lang="sql"><span class="o">&gt;</span> <span class="k">SELECT</span> <span class="n">t</span><span class="p">.</span><span class="n">record</span><span class="p">.</span><span class="n">name</span> <span class="k">AS</span> <span class="n">name</span>
-  <span class="k">FROM</span> <span class="p">(</span><span class="k">SELECT</span> <span class="n">FLATTEN</span><span class="p">(</span><span class="k">data</span><span class="p">.</span><span class="n">records</span><span class="p">)</span> <span class="k">AS</span> <span class="n">record</span> <span class="k">FROM</span> <span class="n">dfs</span><span class="p">.</span><span class="n">tmp</span><span class="p">.</span><span class="o">`</span><span class="n">test</span><span class=" [...]
+
+<div class="language-sql highlighter-rouge"><div class="highlight"><pre class="highlight"><code><span class="o">&gt;</span> <span class="k">SELECT</span> <span class="n">t</span><span class="p">.</span><span class="n">record</span><span class="p">.</span><span class="n">name</span> <span class="k">AS</span> <span class="n">name</span>
+  <span class="k">FROM</span> <span class="p">(</span><span class="k">SELECT</span> <span class="n">FLATTEN</span><span class="p">(</span><span class="k">data</span><span class="p">.</span><span class="n">records</span><span class="p">)</span> <span class="k">AS</span> <span class="n">record</span> <span class="k">FROM</span> <span class="n">dfs</span><span class="p">.</span><span class="n">tmp</span><span class="p">.</span><span class="nv">`test/foo.json`</span><span class="p">)</span>  [...]
 <span class="o">+</span><span class="c1">------------+</span>
 <span class="o">|</span>    <span class="n">name</span>    <span class="o">|</span>
 <span class="o">+</span><span class="c1">------------+</span>
@@ -193,60 +200,66 @@
 <span class="o">|</span> <span class="n">Matthew</span>    <span class="o">|</span>
 <span class="o">|</span> <span class="n">Jasmine</span>    <span class="o">|</span>
 <span class="o">+</span><span class="c1">------------+</span>
-</code></pre></div>
-<p>While this works today, the dataset is technically a single JSON document, so Drill ends up reading the entire dataset into memory. We&#39;re developing a FLATTEN-pushdown mechanism that will enable the JSON reader to emit the individual records into the downstream operators, thereby making this work with datasets of arbitrary size. Once that&#39;s implemented, users will be able to explore any JSON-based dataset in-situ (ie, without having to transform it).</p>
+</code></pre></div></div>
+
+<p>While this works today, the dataset is technically a single JSON document, so Drill ends up reading the entire dataset into memory. We’re developing a FLATTEN-pushdown mechanism that will enable the JSON reader to emit the individual records into the downstream operators, thereby making this work with datasets of arbitrary size. Once that’s implemented, users will be able to explore any JSON-based dataset in-situ (ie, without having to transform it).</p>
 
 <h2 id="full-sql">Full SQL</h2>
 
-<p>Unlike the majority of SQL engines for Hadoop and NoSQL databases, which support SQL-like languages (HiveQL, CQL, etc.), Drill is designed from the ground up to be compliant with ANSI SQL. We simply started with a real SQL parser (Apache Calcite, previously known as Optiq). We&#39;re currently implementing the remaining SQL constructs, and plan to support the full TPC-DS suite (with no query modifications) in 2015. Full SQL support makes BI tools work better, and enables users who are [...]
+<p>Unlike the majority of SQL engines for Hadoop and NoSQL databases, which support SQL-like languages (HiveQL, CQL, etc.), Drill is designed from the ground up to be compliant with ANSI SQL. We simply started with a real SQL parser (Apache Calcite, previously known as Optiq). We’re currently implementing the remaining SQL constructs, and plan to support the full TPC-DS suite (with no query modifications) in 2015. Full SQL support makes BI tools work better, and enables users who are pro [...]
 
 <h2 id="new-data-sources">New Data Sources</h2>
 
 <p>Drill is a standalone, distributed SQL engine. It has a pluggable architecture that allows it to support multiple data sources. Drill 0.6 includes storage plugins for:</p>
 
 <ul>
-<li><a href="https://hadoop.apache.org/docs/current/api/org/apache/hadoop/fs/FileSystem.html">Hadoop File System</a> implementations (local file system, HDFS, MapR-FS, Amazon S3, etc.)</li>
-<li>HBase and MapR-DB</li>
-<li>MongoDB</li>
-<li>Hive Metastore (query any dataset that is registered in Hive Metastore)</li>
+  <li><a href="https://hadoop.apache.org/docs/current/api/org/apache/hadoop/fs/FileSystem.html">Hadoop File System</a> implementations (local file system, HDFS, MapR-FS, Amazon S3, etc.)</li>
+  <li>HBase and MapR-DB</li>
+  <li>MongoDB</li>
+  <li>Hive Metastore (query any dataset that is registered in Hive Metastore)</li>
 </ul>
 
 <p>A single query can join data from different systems. For example, a query can join user profiles in MongoDB with log files in Hadoop, or datasets in multiple Hadoop clusters.</p>
 
-<p>I&#39;m eager to see what storage plugins the community develops over the next 12 months. In the last few weeks alone, developers in the community have expressed their desire (on the <a href="mailto:dev@drill.apache.org">public list</a>) to develop additional storage plugins for the following data sources:</p>
+<p>I’m eager to see what storage plugins the community develops over the next 12 months. In the last few weeks alone, developers in the community have expressed their desire (on the <a href="mailto:dev@drill.apache.org">public list</a>) to develop additional storage plugins for the following data sources:</p>
 
 <ul>
-<li>Cassandra</li>
-<li>Solr</li>
-<li>JDBC (any RDBMS, including Oracle, MySQL, PostgreSQL and SQL Server)</li>
+  <li>Cassandra</li>
+  <li>Solr</li>
+  <li>JDBC (any RDBMS, including Oracle, MySQL, PostgreSQL and SQL Server)</li>
 </ul>
 
-<p>If you&#39;re interested in implementing a new storage plugin, I would encourage you to reach out to the Drill developer community on <a href="mailto:dev@drill.apache.org">dev@drill.apache.org</a>. I&#39;m looking forward to publishing an example of a single-query join across 10 data sources.</p>
+<p>If you’re interested in implementing a new storage plugin, I would encourage you to reach out to the Drill developer community on <a href="mailto:dev@drill.apache.org">dev@drill.apache.org</a>. I’m looking forward to publishing an example of a single-query join across 10 data sources.</p>
 
-<h2 id="drill-spark-integration">Drill/Spark Integration</h2>
+<h2 id="drillspark-integration">Drill/Spark Integration</h2>
 
-<p>We&#39;re seeing growing interest in Spark as an execution engine for data pipelines, providing an alternative to MapReduce. The Drill community is working on integrating Drill and Spark to address a few new use cases:</p>
+<p>We’re seeing growing interest in Spark as an execution engine for data pipelines, providing an alternative to MapReduce. The Drill community is working on integrating Drill and Spark to address a few new use cases:</p>
 
 <ul>
-<li><p>Use a Drill query (or view) as the input to Spark. Drill is a powerful engine for extracting and pre-processing data from various data sources, thereby reducing development time and effort. Here&#39;s an example:</p>
-<div class="highlight"><pre><code class="language-scala" data-lang="scala"><span class="k">val</span> <span class="n">sc</span> <span class="k">=</span> <span class="k">new</span> <span class="nc">SparkContext</span><span class="o">(</span><span class="n">conf</span><span class="o">)</span>
-<span class="k">val</span> <span class="n">result</span> <span class="k">=</span> <span class="n">sc</span><span class="o">.</span><span class="n">drillRDD</span><span class="o">(</span><span class="s">&quot;SELECT * FROM dfs.root.`path/to/logs` l, mongo.mydb.users u WHERE l.user_id = u.id GROUP BY ...&quot;</span><span class="o">)</span>
-<span class="k">val</span> <span class="n">formatted</span> <span class="k">=</span> <span class="n">result</span><span class="o">.</span><span class="n">map</span> <span class="o">{</span> <span class="n">r</span> <span class="k">=&gt;</span>
-  <span class="k">val</span> <span class="o">(</span><span class="n">first</span><span class="o">,</span> <span class="n">last</span><span class="o">,</span> <span class="n">visits</span><span class="o">)</span> <span class="k">=</span> <span class="o">(</span><span class="n">r</span><span class="o">.</span><span class="n">name</span><span class="o">.</span><span class="n">first</span><span class="o">,</span> <span class="n">r</span><span class="o">.</span><span class="n">name</span><spa [...]
-  <span class="n">s</span><span class="s">&quot;$first $last $visits&quot;</span>
-<span class="o">}</span>
-</code></pre></div></li>
-<li><p>Use Drill to query Spark RDDs. Analysts will be able to use BI tools like MicroStrategy, Spotfire and Tableau to query in-memory data in Spark. In addition, Spark developers will be able to embed Drill execution in a Spark data pipeline, thereby enjoying the power of Drill&#39;s schema-free, columnar execution engine.</p></li>
+  <li>
+    <p>Use a Drill query (or view) as the input to Spark. Drill is a powerful engine for extracting and pre-processing data from various data sources, thereby reducing development time and effort. Here’s an example:</p>
+
+    <div class="language-scala highlighter-rouge"><div class="highlight"><pre class="highlight"><code>  <span class="k">val</span> <span class="nv">sc</span> <span class="k">=</span> <span class="k">new</span> <span class="nc">SparkContext</span><span class="o">(</span><span class="n">conf</span><span class="o">)</span>
+  <span class="k">val</span> <span class="nv">result</span> <span class="k">=</span> <span class="nv">sc</span><span class="o">.</span><span class="py">drillRDD</span><span class="o">(</span><span class="s">"SELECT * FROM dfs.root.`path/to/logs` l, mongo.mydb.users u WHERE l.user_id = u.id GROUP BY ..."</span><span class="o">)</span>
+  <span class="k">val</span> <span class="nv">formatted</span> <span class="k">=</span> <span class="nv">result</span><span class="o">.</span><span class="py">map</span> <span class="o">{</span> <span class="n">r</span> <span class="k">=&gt;</span>
+    <span class="nf">val</span> <span class="o">(</span><span class="n">first</span><span class="o">,</span> <span class="n">last</span><span class="o">,</span> <span class="n">visits</span><span class="o">)</span> <span class="k">=</span> <span class="o">(</span><span class="nv">r</span><span class="o">.</span><span class="py">name</span><span class="o">.</span><span class="py">first</span><span class="o">,</span> <span class="nv">r</span><span class="o">.</span><span class="py">name</s [...]
+    <span class="n">s</span><span class="s">"$first $last $visits"</span>
+  <span class="o">}</span>
+</code></pre></div>    </div>
+  </li>
+  <li>
+    <p>Use Drill to query Spark RDDs. Analysts will be able to use BI tools like MicroStrategy, Spotfire and Tableau to query in-memory data in Spark. In addition, Spark developers will be able to embed Drill execution in a Spark data pipeline, thereby enjoying the power of Drill’s schema-free, columnar execution engine.</p>
+  </li>
 </ul>
 
 <h2 id="operational-enhancements">Operational Enhancements</h2>
 
-<p>As we continue with our monthly releases and march towards the 1.0 release early next year, we&#39;re focused on improving Drill&#39;s speed and scalability. We&#39;ll also enhance Drill&#39;s multi-tenancy with more advanced workload management.</p>
+<p>As we continue with our monthly releases and march towards the 1.0 release early next year, we’re focused on improving Drill’s speed and scalability. We’ll also enhance Drill’s multi-tenancy with more advanced workload management.</p>
 
 <ul>
-<li><strong>Speed</strong>: Drill is already extremely fast, and we&#39;re going to make it even faster over the next few months. With that said, we think that improving user productivity and time-to-insight is as important as shaving a few milliseconds off a query&#39;s runtime.</li>
-<li><strong>Scalability</strong>: To date we&#39;ve focused mainly on clusters of up to a couple hundred nodes. We&#39;re currently working to support clusters with thousands of nodes. We&#39;re also improving concurrency to better support deployments in which hundreds of analysts or developers are running queries at the same time.</li>
-<li><strong>Workload management</strong>: A single cluster is often shared among many users and groups, and everyone expects answers in real-time. Workload management prioritizes the allocation of resources to ensure that the most important workloads get done first so that business demands can be met. Administrators need to be able to assign priorities and quotas at a fine granularity. We&#39;re working on enhancing Drill&#39;s workload management to provide these capabilities while prov [...]
+  <li><strong>Speed</strong>: Drill is already extremely fast, and we’re going to make it even faster over the next few months. With that said, we think that improving user productivity and time-to-insight is as important as shaving a few milliseconds off a query’s runtime.</li>
+  <li><strong>Scalability</strong>: To date we’ve focused mainly on clusters of up to a couple hundred nodes. We’re currently working to support clusters with thousands of nodes. We’re also improving concurrency to better support deployments in which hundreds of analysts or developers are running queries at the same time.</li>
+  <li><strong>Workload management</strong>: A single cluster is often shared among many users and groups, and everyone expects answers in real-time. Workload management prioritizes the allocation of resources to ensure that the most important workloads get done first so that business demands can be met. Administrators need to be able to assign priorities and quotas at a fine granularity. We’re working on enhancing Drill’s workload management to provide these capabilities while providing  [...]
 </ul>
 
 <h2 id="we-would-love-to-hear-from-you">We Would Love to Hear From You!</h2>
@@ -254,12 +267,12 @@
 <p>Are there other features you would like to see in Drill? We would love to hear from you:</p>
 
 <ul>
-<li>Drill users: <a href="mailto:user@drill.apache.org">user@drill.apache.org</a></li>
-<li>Drill developers: <a href="mailto:dev@drill.apache.org">dev@drill.apache.org</a></li>
-<li>Me: <a href="mailto:tshiran@apache.org">tshiran@apache.org</a></li>
+  <li>Drill users: <a href="mailto:user@drill.apache.org">user@drill.apache.org</a></li>
+  <li>Drill developers: <a href="mailto:dev@drill.apache.org">dev@drill.apache.org</a></li>
+  <li>Me: <a href="mailto:tshiran@apache.org">tshiran@apache.org</a></li>
 </ul>
 
-<p>Happy Drilling!<br>
+<p>Happy Drilling!<br />
 Tomer Shiran</p>
 
   </article>
@@ -284,7 +297,7 @@ Tomer Shiran</p>
   <p class="push"></p>
 <div id="footer" class="mw">
 <div class="wrapper">
-Copyright © 2012-2014 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
+Copyright © 2012-2020 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
 Apache and the Apache feather logo are trademarks of The Apache Software Foundation. Other names appearing on the site may be trademarks of their respective owners.<br/><br/>
 </div>
 </div>
diff --git a/blog/2014/12/23/drill-0.7-released/index.html b/blog/2014/12/23/drill-0.7-released/index.html
index 2e9b135..1d7b080 100644
--- a/blog/2014/12/23/drill-0.7-released/index.html
+++ b/blog/2014/12/23/drill-0.7-released/index.html
@@ -137,22 +137,22 @@
   <div class="addthis_sharing_toolbox"></div>
 
   <article class="post-content">
-    <p>I&#39;m excited to announce that the community has just released Drill 0.7, which includes <a href="https://issues.apache.org/jira/secure/ReleaseNote.jspa?projectId=12313820&version=12327473">228 resolved JIRAs</a> and numerous enhancements such as: </p>
+    <p>I’m excited to announce that the community has just released Drill 0.7, which includes <a href="https://issues.apache.org/jira/secure/ReleaseNote.jspa?projectId=12313820&amp;version=12327473">228 resolved JIRAs</a> and numerous enhancements such as:</p>
 
 <ul>
-<li>No dependency on UDP multicast. Drill can now work on EC2, as well as clusters with multiple subnets or multihomed configurations</li>
-<li><a href="https://cwiki.apache.org/confluence/display/DRILL/Partition+Pruning">Automatic partition pruning</a> based on directory structures</li>
-<li>New nested data functions: <a href="https://cwiki.apache.org/confluence/display/DRILL/KVGEN+Function">KVGEN</a> and <a href="https://cwiki.apache.org/confluence/display/DRILL/FLATTEN+Function">FLATTEN</a></li>
-<li>Fast &quot;schema&quot; return. This provides a better experience when using BI tools</li>
-<li>Hive 0.13 Metastore support</li>
-<li>Improved performance for queries on JSON data</li>
+  <li>No dependency on UDP multicast. Drill can now work on EC2, as well as clusters with multiple subnets or multihomed configurations</li>
+  <li><a href="https://cwiki.apache.org/confluence/display/DRILL/Partition+Pruning">Automatic partition pruning</a> based on directory structures</li>
+  <li>New nested data functions: <a href="https://cwiki.apache.org/confluence/display/DRILL/KVGEN+Function">KVGEN</a> and <a href="https://cwiki.apache.org/confluence/display/DRILL/FLATTEN+Function">FLATTEN</a></li>
+  <li>Fast “schema” return. This provides a better experience when using BI tools</li>
+  <li>Hive 0.13 Metastore support</li>
+  <li>Improved performance for queries on JSON data</li>
 </ul>
 
 <p>You can now <a href="/download/">download Drill 0.7</a>. As always, you may check out the official <a href="https://cwiki.apache.org/confluence/display/DRILL/Release+Notes">release notes</a> for more details.</p>
 
-<p>In case you&#39;re interested in understanding more about where we&#39;re heading, check out Tomer&#39;s recent blog post outlining some of the <a href="/blog/2014/12/16/whats-coming-in-2015/">planned initiatives for 2015</a>.</p>
+<p>In case you’re interested in understanding more about where we’re heading, check out Tomer’s recent blog post outlining some of the <a href="/blog/2014/12/16/whats-coming-in-2015/">planned initiatives for 2015</a>.</p>
 
-<p>Happy Drilling!<br>
+<p>Happy Drilling!<br />
 Jacques Nadeau</p>
 
   </article>
@@ -177,7 +177,7 @@ Jacques Nadeau</p>
   <p class="push"></p>
 <div id="footer" class="mw">
 <div class="wrapper">
-Copyright © 2012-2014 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
+Copyright © 2012-2020 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
 Apache and the Apache feather logo are trademarks of The Apache Software Foundation. Other names appearing on the site may be trademarks of their respective owners.<br/><br/>
 </div>
 </div>
diff --git a/blog/2015/01/27/schema-free-json-data-infrastructure/index.html b/blog/2015/01/27/schema-free-json-data-infrastructure/index.html
index 0e61f2c..31873cf 100644
--- a/blog/2015/01/27/schema-free-json-data-infrastructure/index.html
+++ b/blog/2015/01/27/schema-free-json-data-infrastructure/index.html
@@ -137,18 +137,18 @@
   <div class="addthis_sharing_toolbox"></div>
 
   <article class="post-content">
-    <p>JSON has emerged in recent years as the de-facto standard data exchange format. It is being used everywhere. Front-end Web applications use JSON to maintain data and communicate with back-end applications. Web APIs are JSON-based (eg, <a href="https://dev.twitter.com/rest/public">Twitter REST APIs</a>, <a href="http://developers.marketo.com/documentation/rest/">Marketo REST APIs</a>, <a href="https://developer.github.com/v3/">GitHub API</a>). It&#39;s the format of choice for publ [...]
+    <p>JSON has emerged in recent years as the de-facto standard data exchange format. It is being used everywhere. Front-end Web applications use JSON to maintain data and communicate with back-end applications. Web APIs are JSON-based (eg, <a href="https://dev.twitter.com/rest/public">Twitter REST APIs</a>, <a href="http://developers.marketo.com/documentation/rest/">Marketo REST APIs</a>, <a href="https://developer.github.com/v3/">GitHub API</a>). It’s the format of choice for public d [...]
 
 <h1 id="why-is-json-a-convenient-data-exchange-format">Why is JSON a Convenient Data Exchange Format?</h1>
 
-<p>While I won&#39;t dive into the historical roots of JSON (JavaScript Object Notation, <a href="http://en.wikipedia.org/wiki/JSON#JavaScript_eval.28.29"><code>eval()</code></a>, etc.), I do want to highlight several attributes of JSON that make it a convenient data exchange format:</p>
+<p>While I won’t dive into the historical roots of JSON (JavaScript Object Notation, <a href="http://en.wikipedia.org/wiki/JSON#JavaScript_eval.28.29"><code class="language-plaintext highlighter-rouge">eval()</code></a>, etc.), I do want to highlight several attributes of JSON that make it a convenient data exchange format:</p>
 
 <ul>
-<li><strong>JSON is self-describing</strong>. You can look at a JSON document and understand what it represents. The field names are included in the document. You don&#39;t need an external schema or definition to interpret JSON-encoded data. This makes life easier for anyone who wants to deal with the data, and it also means that a collection of JSON documents represents what many people call a &quot;schema-less dataset&quot; (where structure can evolve, and different records can have d [...]
-<li><strong>JSON is simple</strong>. Other self-describing formats such as XML are much more complicated. A JSON document is made up of arrays and maps (or objects, in JSON terminology), and that&#39;s about it.</li>
-<li><strong>JSON can naturally represent real-world objects</strong>. Try representing your application&#39;s <code>Customer</code> object (with the person&#39;s address, order history, etc.) in a CSV file or a relational database. It&#39;s hard. In fact, ORM systems were invented to help alleviate this issue.</li>
-<li><strong>JSON libraries are available in virtually every programming language</strong>. Take a look at <a href="http://www.json.org/">the list of supported languages on JSON.org</a>. I counted 15 languages that start with the letters A, B or C.</li>
-<li><strong>JSON is idiomatic in loosely typed languages</strong>. Many loosely typed languages, such as Python, Ruby and JavaScript, have data structures that are similar to JSON objects, making it very natural to handle JSON data in those languages. For example, a Python dictionary looks just like a JSON object. This makes it easy for developers to utilize JSON in their applications.</li>
+  <li><strong>JSON is self-describing</strong>. You can look at a JSON document and understand what it represents. The field names are included in the document. You don’t need an external schema or definition to interpret JSON-encoded data. This makes life easier for anyone who wants to deal with the data, and it also means that a collection of JSON documents represents what many people call a “schema-less dataset” (where structure can evolve, and different records can have different fie [...]
+  <li><strong>JSON is simple</strong>. Other self-describing formats such as XML are much more complicated. A JSON document is made up of arrays and maps (or objects, in JSON terminology), and that’s about it.</li>
+  <li><strong>JSON can naturally represent real-world objects</strong>. Try representing your application’s <code class="language-plaintext highlighter-rouge">Customer</code> object (with the person’s address, order history, etc.) in a CSV file or a relational database. It’s hard. In fact, ORM systems were invented to help alleviate this issue.</li>
+  <li><strong>JSON libraries are available in virtually every programming language</strong>. Take a look at <a href="http://www.json.org/">the list of supported languages on JSON.org</a>. I counted 15 languages that start with the letters A, B or C.</li>
+  <li><strong>JSON is idiomatic in loosely typed languages</strong>. Many loosely typed languages, such as Python, Ruby and JavaScript, have data structures that are similar to JSON objects, making it very natural to handle JSON data in those languages. For example, a Python dictionary looks just like a JSON object. This makes it easy for developers to utilize JSON in their applications.</li>
 </ul>
 
 <h1 id="json-data-infrastructure">JSON Data Infrastructure</h1>
@@ -158,38 +158,41 @@
 <p>However, a new class of data infrastructure is providing a much more seamless experience via a full-fledged JSON data model. For example:</p>
 
 <ul>
-<li>Drill is a SQL engine in which each record is conceptually a JSON document.</li>
-<li>Elasticsearch is a search engine in which each indexed document is conceptually a JSON document.</li>
-<li>MongoDB is an operational database in which each record is conceptually a JSON document.</li>
+  <li>Drill is a SQL engine in which each record is conceptually a JSON document.</li>
+  <li>Elasticsearch is a search engine in which each indexed document is conceptually a JSON document.</li>
+  <li>MongoDB is an operational database in which each record is conceptually a JSON document.</li>
 </ul>
 
 <p>These systems view JSON as a data model as opposed to one of many data types, realizing that JSON offers a simple way to represent real-world objects.</p>
 
-<table><thead>
-<tr>
-<th></th>
-<th>Traditional Infrastructure</th>
-<th>JSON Infrastructure</th>
-</tr>
-</thead><tbody>
-<tr>
-<td><strong>Examples:</strong></td>
-<td>Oracle, SQL Server</td>
-<td>Drill, Elasticsearch, MongoDB</td>
-</tr>
-<tr>
-<td><strong>Record:</strong></td>
-<td>Tuple</td>
-<td>JSON document</td>
-</tr>
-<tr>
-<td><strong>Variable schema:</strong></td>
-<td>No</td>
-<td>Yes</td>
-</tr>
-</tbody></table>
-
-<p>If you happen to be in the Bay Area tomorrow, please join Gaurav Gupta (VP Product Management, Elasticsearch), Paul Pedersen (Deputy CTO, MongoDB), Robert Greene (Senior Principal Product Manager, Oracle), Sukanta Ganguly (VP Solutions Architecture, Aerospike) and me for a panel moderated by Gartner&#39;s Nick Heudecker on this new world of schema-free JSON. Check out <a href="http://www.meetup.com/SF-Bay-Areas-Big-Data-Think-Tank/">The Hive Big Data Think Tank</a> for more information.</p>
+<table>
+  <thead>
+    <tr>
+      <th> </th>
+      <th>Traditional Infrastructure</th>
+      <th>JSON Infrastructure</th>
+    </tr>
+  </thead>
+  <tbody>
+    <tr>
+      <td><strong>Examples:</strong></td>
+      <td>Oracle, SQL Server</td>
+      <td>Drill, Elasticsearch, MongoDB</td>
+    </tr>
+    <tr>
+      <td><strong>Record:</strong></td>
+      <td>Tuple</td>
+      <td>JSON document</td>
+    </tr>
+    <tr>
+      <td><strong>Variable schema:</strong></td>
+      <td>No</td>
+      <td>Yes</td>
+    </tr>
+  </tbody>
+</table>
+
+<p>If you happen to be in the Bay Area tomorrow, please join Gaurav Gupta (VP Product Management, Elasticsearch), Paul Pedersen (Deputy CTO, MongoDB), Robert Greene (Senior Principal Product Manager, Oracle), Sukanta Ganguly (VP Solutions Architecture, Aerospike) and me for a panel moderated by Gartner’s Nick Heudecker on this new world of schema-free JSON. Check out <a href="http://www.meetup.com/SF-Bay-Areas-Big-Data-Think-Tank/">The Hive Big Data Think Tank</a> for more information.</p>
 
   </article>
  <div id="disqus_thread"></div>
@@ -213,7 +216,7 @@
   <p class="push"></p>
 <div id="footer" class="mw">
 <div class="wrapper">
-Copyright © 2012-2014 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
+Copyright © 2012-2020 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
 Apache and the Apache feather logo are trademarks of The Apache Software Foundation. Other names appearing on the site may be trademarks of their respective owners.<br/><br/>
 </div>
 </div>
diff --git a/blog/2015/03/23/microstrategy-announces-drill-support/index.html b/blog/2015/03/23/microstrategy-announces-drill-support/index.html
index 5b9309b..5b89575 100644
--- a/blog/2015/03/23/microstrategy-announces-drill-support/index.html
+++ b/blog/2015/03/23/microstrategy-announces-drill-support/index.html
@@ -137,13 +137,13 @@
   <div class="addthis_sharing_toolbox"></div>
 
   <article class="post-content">
-    <p>Today MicroStrategy  <a href="http://ir.microstrategy.com/releasedetail.cfm?ReleaseID=902795">announced</a> that it has certified its platform with Apache Drill. According to MicroStrategy&#39;s CTO, Tim Lang, Drill reduces the time-to-value for MicroStrategy users, and enables them to leverage multi-structured data.</p>
+    <p>Today MicroStrategy  <a href="http://ir.microstrategy.com/releasedetail.cfm?ReleaseID=902795">announced</a> that it has certified its platform with Apache Drill. According to MicroStrategy’s CTO, Tim Lang, Drill reduces the time-to-value for MicroStrategy users, and enables them to leverage multi-structured data.</p>
 
-<p>Many early adopters of Drill have been interested in leveraging MicroStrategy&#39;s powerful BI platform. With it&#39;s first-class support for self-describing data and evolving structure, Drill enables MicroStrategy users to explore and analyze the data in Hadoop and NoSQL databases without the usual friction that comes with having to define and manage schemas.</p>
+<p>Many early adopters of Drill have been interested in leveraging MicroStrategy’s powerful BI platform. With it’s first-class support for self-describing data and evolving structure, Drill enables MicroStrategy users to explore and analyze the data in Hadoop and NoSQL databases without the usual friction that comes with having to define and manage schemas.</p>
 
-<p>If you would like to learn more about this integration, <a href="http://info.microstrategy.com/accessing-multi-structured-data-sources">sign up</a> for MicroStrategy&#39;s webinar next month, which includes a live demo of the integration with Drill.</p>
+<p>If you would like to learn more about this integration, <a href="http://info.microstrategy.com/accessing-multi-structured-data-sources">sign up</a> for MicroStrategy’s webinar next month, which includes a live demo of the integration with Drill.</p>
 
-<p>Happy Drilling!<br>
+<p>Happy Drilling!<br />
 Tomer Shiran</p>
 
   </article>
@@ -168,7 +168,7 @@ Tomer Shiran</p>
   <p class="push"></p>
 <div id="footer" class="mw">
 <div class="wrapper">
-Copyright © 2012-2014 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
+Copyright © 2012-2020 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
 Apache and the Apache feather logo are trademarks of The Apache Software Foundation. Other names appearing on the site may be trademarks of their respective owners.<br/><br/>
 </div>
 </div>
diff --git a/blog/2015/03/31/drill-0.8-released/index.html b/blog/2015/03/31/drill-0.8-released/index.html
index 97396f5..0b92aa5 100644
--- a/blog/2015/03/31/drill-0.8-released/index.html
+++ b/blog/2015/03/31/drill-0.8-released/index.html
@@ -144,23 +144,23 @@
   <div class="addthis_sharing_toolbox"></div>
 
   <article class="post-content">
-    <p>We&#39;re excited to announce that the community has just released Drill 0.8, which includes <a href="https://issues.apache.org/jira/secure/ReleaseNote.jspa?projectId=12313820&version=12328812">243 resolved JIRAs</a> and numerous enhancements such as: </p>
+    <p>We’re excited to announce that the community has just released Drill 0.8, which includes <a href="https://issues.apache.org/jira/secure/ReleaseNote.jspa?projectId=12313820&amp;version=12328812">243 resolved JIRAs</a> and numerous enhancements such as:</p>
 
 <ul>
-<li><strong>Bytecode rewriting</strong>. Drill now leverages code optimization techniques such as bytecode rewriting and inlining to enhance the speed of many queries by reducing overall memory usage and CPU instructions.</li>
-<li><strong>Advanced partition pruning</strong>. Drill can now prune partitions based on arbitrarily complex expressions. For example, specify <code>WHERE dir0 LIKE &#39;2015-%&#39;</code> and your query will look inside the directory &quot;2015-01&quot; but not inside &quot;2014-12&quot;.</li>
-<li><strong>Real-time query diagnostics</strong>. You can now see exactly what your queries are doing in real-time, making it easy to troubleshoot, optimize and manage execution.</li>
-<li><strong>Large records, large # of files</strong>. Drill was previously limited to small records of up to 128KB. It now supports records of any size. In addition, query performance has been improved when dealing with large numbers of files thanks to a variety of optimizations such as parallel metadata reads.</li>
-<li><strong>More SQL</strong>. Drill now features complete support for <code>UNION ALL</code> and <code>COUNT(DISTINCT)</code>. Drill 0.8 also includes new functions such as <code>unix_timestamp</code> and the window functions <code>sum</code>, <code>count</code> and <code>rank</code>. Note that these window functions should be considered beta.</li>
-<li><strong>Better compression</strong>. Drill can now query compressed JSON files. In addition, the user can control Parquet compression in CTAS (<code>CREATE TABLE AS</code>) statements.</li>
-<li><strong>Performance</strong>. Drill 0.8 includes broadcast joins, disk-based joins, parallel metadata reads and many other performance-related enhancements.</li>
-<li><strong>Reliability</strong>. Drill 0.8 includes a variety of fixes that improve the stability of the drillbit daemon, the sqlline shell and the ODBC and JDBC drivers.</li>
-<li><strong>HBase 0.98 support</strong>. You can now run SQL queries on any HBase 0.98 table.</li>
+  <li><strong>Bytecode rewriting</strong>. Drill now leverages code optimization techniques such as bytecode rewriting and inlining to enhance the speed of many queries by reducing overall memory usage and CPU instructions.</li>
+  <li><strong>Advanced partition pruning</strong>. Drill can now prune partitions based on arbitrarily complex expressions. For example, specify <code class="language-plaintext highlighter-rouge">WHERE dir0 LIKE '2015-%'</code> and your query will look inside the directory “2015-01” but not inside “2014-12”.</li>
+  <li><strong>Real-time query diagnostics</strong>. You can now see exactly what your queries are doing in real-time, making it easy to troubleshoot, optimize and manage execution.</li>
+  <li><strong>Large records, large # of files</strong>. Drill was previously limited to small records of up to 128KB. It now supports records of any size. In addition, query performance has been improved when dealing with large numbers of files thanks to a variety of optimizations such as parallel metadata reads.</li>
+  <li><strong>More SQL</strong>. Drill now features complete support for <code class="language-plaintext highlighter-rouge">UNION ALL</code> and <code class="language-plaintext highlighter-rouge">COUNT(DISTINCT)</code>. Drill 0.8 also includes new functions such as <code class="language-plaintext highlighter-rouge">unix_timestamp</code> and the window functions <code class="language-plaintext highlighter-rouge">sum</code>, <code class="language-plaintext highlighter-rouge">count</code> a [...]
+  <li><strong>Better compression</strong>. Drill can now query compressed JSON files. In addition, the user can control Parquet compression in CTAS (<code class="language-plaintext highlighter-rouge">CREATE TABLE AS</code>) statements.</li>
+  <li><strong>Performance</strong>. Drill 0.8 includes broadcast joins, disk-based joins, parallel metadata reads and many other performance-related enhancements.</li>
+  <li><strong>Reliability</strong>. Drill 0.8 includes a variety of fixes that improve the stability of the drillbit daemon, the sqlline shell and the ODBC and JDBC drivers.</li>
+  <li><strong>HBase 0.98 support</strong>. You can now run SQL queries on any HBase 0.98 table.</li>
 </ul>
 
 <p>You can now <a href="/download/">download Drill 0.8</a>. As always, you may check out the official <a href="https://cwiki.apache.org/confluence/display/DRILL/Release+Notes">release notes</a> for more details.</p>
 
-<p>Happy Drilling!<br>
+<p>Happy Drilling!<br />
 Tomer Shiran and Jacques Nadeau</p>
 
   </article>
@@ -185,7 +185,7 @@ Tomer Shiran and Jacques Nadeau</p>
   <p class="push"></p>
 <div id="footer" class="mw">
 <div class="wrapper">
-Copyright © 2012-2014 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
+Copyright © 2012-2020 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
 Apache and the Apache feather logo are trademarks of The Apache Software Foundation. Other names appearing on the site may be trademarks of their respective owners.<br/><br/>
 </div>
 </div>
diff --git a/blog/2015/04/30/apache-parquet-graudates-to-a-top-level-project/index.html b/blog/2015/04/30/apache-parquet-graudates-to-a-top-level-project/index.html
index a4adc26..cdffd10 100644
--- a/blog/2015/04/30/apache-parquet-graudates-to-a-top-level-project/index.html
+++ b/blog/2015/04/30/apache-parquet-graudates-to-a-top-level-project/index.html
@@ -137,15 +137,15 @@
   <div class="addthis_sharing_toolbox"></div>
 
   <article class="post-content">
-    <p>It&#39;s an exciting day. Apache Parquet, the de-facto standard columnar format for Hadoop, has graduated to an Apache top-level project.</p>
+    <p>It’s an exciting day. Apache Parquet, the de-facto standard columnar format for Hadoop, has graduated to an Apache top-level project.</p>
 
-<p>The Drill project supports a variety of file formats, but Parquet is the highest performing format, and it&#39;s the one we recommend to anyone who wants to maximize the performance of their queries. We&#39;ve had the pleasure of working closely with the Parquet community for over two years, and it&#39;s exciting to see how much the project has evolved.</p>
+<p>The Drill project supports a variety of file formats, but Parquet is the highest performing format, and it’s the one we recommend to anyone who wants to maximize the performance of their queries. We’ve had the pleasure of working closely with the Parquet community for over two years, and it’s exciting to see how much the project has evolved.</p>
 
-<p>We&#39;ve made a number of contributions to the project, including support for self-describing data. We just implemented off-heap memory management for the Parquet readers and writers, which will improve Parquet&#39;s memory handling. (This enhancement will be available in Parquet 1.8.)</p>
+<p>We’ve made a number of contributions to the project, including support for self-describing data. We just implemented off-heap memory management for the Parquet readers and writers, which will improve Parquet’s memory handling. (This enhancement will be available in Parquet 1.8.)</p>
 
-<p>I wanted to congratulate Twitter&#39;s Julien Le Dem (<a href="https://twitter.com/j_">@j_</a>), VP of Apache Parquet, and the entire Parquet community on the graduation milestone. Oh, and how can I get a two-letter Twitter handle?</p>
+<p>I wanted to congratulate Twitter’s Julien Le Dem (<a href="https://twitter.com/j_">@j_</a>), VP of Apache Parquet, and the entire Parquet community on the graduation milestone. Oh, and how can I get a two-letter Twitter handle?</p>
 
-<p>Happy Drilling!<br>
+<p>Happy Drilling!<br />
 Tomer Shiran</p>
 
   </article>
@@ -170,7 +170,7 @@ Tomer Shiran</p>
   <p class="push"></p>
 <div id="footer" class="mw">
 <div class="wrapper">
-Copyright © 2012-2014 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
+Copyright © 2012-2020 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
 Apache and the Apache feather logo are trademarks of The Apache Software Foundation. Other names appearing on the site may be trademarks of their respective owners.<br/><br/>
 </div>
 </div>
diff --git a/blog/2015/05/04/drill-0.9-released/index.html b/blog/2015/05/04/drill-0.9-released/index.html
index a6813bf..67727c2 100644
--- a/blog/2015/05/04/drill-0.9-released/index.html
+++ b/blog/2015/05/04/drill-0.9-released/index.html
@@ -144,30 +144,33 @@
   <div class="addthis_sharing_toolbox"></div>
 
   <article class="post-content">
-    <p>It has been about a month since the release of Drill 0.8, which included <a href="/blog/drill-0.8-released/">more than 240 improvements</a>. Today we&#39;re happy to announce the availability of Drill 0.9, providing additional enhancements and bug fixes. In fact, this release includes <a href="https://issues.apache.org/jira/secure/ReleaseNote.jspa?projectId=12313820&version=12328813">200 resolved JIRAs</a>. Some of the noteworthy features in Drill 0.9 are:</p>
+    <p>It has been about a month since the release of Drill 0.8, which included <a href="/blog/drill-0.8-released/">more than 240 improvements</a>. Today we’re happy to announce the availability of Drill 0.9, providing additional enhancements and bug fixes. In fact, this release includes <a href="https://issues.apache.org/jira/secure/ReleaseNote.jspa?projectId=12313820&amp;version=12328813">200 resolved JIRAs</a>. Some of the noteworthy features in Drill 0.9 are:</p>
 
 <ul>
-<li><strong>Authentication</strong> (<a href="https://issues.apache.org/jira/browse/DRILL-2674">DRILL-2674</a>). Drill now supports username/password authentication through the Java and C++ clients, as well as JDBC and ODBC. On the server-side, Drill leverages Linux PAM to securely validate the credentials. Users can choose to use an external user directory such as Active Directory or LDAP. To enable authentication, set the <code>security.user.auth</code> option in <code>drill-override.c [...]
-<li><strong>Impersonation</strong> (<a href="https://issues.apache.org/jira/browse/DRILL-2363">DRILL-2363</a>). Queries now execute and access resources using the identity of the user who submitted the query. Previously, all queries would run as the same user (eg, <code>drill</code>). With the new impersonation capability, the query will fail if the submitting user does not have permission to read the requested file(s) in the distributed file system. To enable impersonation, set the <cod [...]
-<li><strong>Ownership chaining</strong>. Drill now allows views with different owners to be chained. This represents a very flexible access control solution. For example, an administrator with access to raw, sensitive data could create a view called <code>masked</code> which would expose only a subset of the data to other users. The administrator would enable users to read the <code>masked</code> view but not the raw data. Note that Drill provides an option <code>max_chained_user_hops</c [...]
-<li><strong>MongoDB authentication</strong> (<a href="https://issues.apache.org/jira/browse/DRILL-1502">DRILL-1502</a>). Drill can now connect to a MongoDB cluster that requires authentication.</li>
-<li><strong>Extended JSON datatypes</strong>. Our friends at MongoDB invented <a href="http://docs.mongodb.org/manual/reference/mongodb-extended-json/">extended JSON</a> - a set of extensions to the JSON format for supporting additional data types. We decided to embrace extended JSON in Drill. For example, standard JSON doesn&#39;t have a time type, so a time could be represented as either a string or a number: <code>{&quot;foo&quot;: &quot;19:20:30.450Z&quot;}</code> is just a string. W [...]
-We now support a number of qualifiers including <code>$bin</code>, <code>$date</code>, <code>$time</code>, <code>$interval</code>, <code>$numberLong</code> and <code>$dateDay</code> (see <a href="https://github.com/apache/drill/blob/master/exec/java-exec/src/test/resources/vector/complex/extended.json">the example</a>). We&#39;re in the process of adding some additional qualifiers to make sure that all of MongoDB&#39;s extended types are supported (this is particularly important when que [...]
-<li><strong>Avro support</strong> (<a href="https://issues.apache.org/jira/browse/DRILL-1512">DRILL-1512</a>). Drill can now read Avro files. This patch was contributed by Andrew Selden at Elastic.co (formerly known as Elasticsearch).</li>
-<li><strong>Improved error messages</strong> (<a href="https://issues.apache.org/jira/browse/DRILL-2675">DRILL-2675</a> and more). It can be challenging for a complex distributed system like Drill to translate low-level internal conditions into actionable messages to the user. This release includes several enhancements that enable Drill to accomplish just that in a variety of cases.</li>
-<li><strong>Parquet and Calcite enhancements</strong> (<a href="https://issues.apache.org/jira/browse/DRILL-1410">DRILL-1410</a> and <a href="https://issues.apache.org/jira/browse/DRILL-1384">DRILL-1384</a>). Drill isn&#39;t a traditional query engine - it&#39;s the first analytical query engine with a JSON data model. This has required us to enhance Parquet (our columnar format) and Calcite (our SQL parser). These enhancements have now been contributed back to those projects, and Drill  [...]
-<li><p><strong>New sys tables for memory and thread information</strong> (<a href="https://issues.apache.org/jira/browse/DRILL-2275">DRILL-2275</a>). Drill includes two new <code>sys</code> tables that provide real-time metrics about memory utilization and threads on each of the nodes in the cluster. You can run a simple <code>SELECT *</code> to see what information is available:</p>
-<div class="highlight"><pre><code class="language-sql" data-lang="sql"><span class="k">SELECT</span> <span class="o">*</span> <span class="k">FROM</span> <span class="n">sys</span><span class="p">.</span><span class="n">drillmemory</span><span class="p">;</span>
-<span class="k">SELECT</span> <span class="o">*</span> <span class="k">FROM</span> <span class="n">sys</span><span class="p">.</span><span class="n">drillbitthreads</span><span class="p">;</span>
-</code></pre></div></li>
-<li><p><strong>Support for very wide tables</strong> (<a href="https://issues.apache.org/jira/browse/DRILL-2739">DRILL-2739</a>). Drill previously had some issues with tables that had more than 4095 colums. This limitation has been addressed.</p></li>
+  <li><strong>Authentication</strong> (<a href="https://issues.apache.org/jira/browse/DRILL-2674">DRILL-2674</a>). Drill now supports username/password authentication through the Java and C++ clients, as well as JDBC and ODBC. On the server-side, Drill leverages Linux PAM to securely validate the credentials. Users can choose to use an external user directory such as Active Directory or LDAP. To enable authentication, set the <code class="language-plaintext highlighter-rouge">security.us [...]
+  <li><strong>Impersonation</strong> (<a href="https://issues.apache.org/jira/browse/DRILL-2363">DRILL-2363</a>). Queries now execute and access resources using the identity of the user who submitted the query. Previously, all queries would run as the same user (eg, <code class="language-plaintext highlighter-rouge">drill</code>). With the new impersonation capability, the query will fail if the submitting user does not have permission to read the requested file(s) in the distributed fil [...]
+  <li><strong>Ownership chaining</strong>. Drill now allows views with different owners to be chained. This represents a very flexible access control solution. For example, an administrator with access to raw, sensitive data could create a view called <code class="language-plaintext highlighter-rouge">masked</code> which would expose only a subset of the data to other users. The administrator would enable users to read the <code class="language-plaintext highlighter-rouge">masked</code>  [...]
+  <li><strong>MongoDB authentication</strong> (<a href="https://issues.apache.org/jira/browse/DRILL-1502">DRILL-1502</a>). Drill can now connect to a MongoDB cluster that requires authentication.</li>
+  <li><strong>Extended JSON datatypes</strong>. Our friends at MongoDB invented <a href="http://docs.mongodb.org/manual/reference/mongodb-extended-json/">extended JSON</a> - a set of extensions to the JSON format for supporting additional data types. We decided to embrace extended JSON in Drill. For example, standard JSON doesn’t have a time type, so a time could be represented as either a string or a number: <code class="language-plaintext highlighter-rouge">{"foo": "19:20:30.450Z"}</co [...]
+We now support a number of qualifiers including <code class="language-plaintext highlighter-rouge">$bin</code>, <code class="language-plaintext highlighter-rouge">$date</code>, <code class="language-plaintext highlighter-rouge">$time</code>, <code class="language-plaintext highlighter-rouge">$interval</code>, <code class="language-plaintext highlighter-rouge">$numberLong</code> and <code class="language-plaintext highlighter-rouge">$dateDay</code> (see <a href="https://github.com/apache/ [...]
+  <li><strong>Avro support</strong> (<a href="https://issues.apache.org/jira/browse/DRILL-1512">DRILL-1512</a>). Drill can now read Avro files. This patch was contributed by Andrew Selden at Elastic.co (formerly known as Elasticsearch).</li>
+  <li><strong>Improved error messages</strong> (<a href="https://issues.apache.org/jira/browse/DRILL-2675">DRILL-2675</a> and more). It can be challenging for a complex distributed system like Drill to translate low-level internal conditions into actionable messages to the user. This release includes several enhancements that enable Drill to accomplish just that in a variety of cases.</li>
+  <li><strong>Parquet and Calcite enhancements</strong> (<a href="https://issues.apache.org/jira/browse/DRILL-1410">DRILL-1410</a> and <a href="https://issues.apache.org/jira/browse/DRILL-1384">DRILL-1384</a>). Drill isn’t a traditional query engine - it’s the first analytical query engine with a JSON data model. This has required us to enhance Parquet (our columnar format) and Calcite (our SQL parser). These enhancements have now been contributed back to those projects, and Drill is usi [...]
+  <li>
+    <p><strong>New sys tables for memory and thread information</strong> (<a href="https://issues.apache.org/jira/browse/DRILL-2275">DRILL-2275</a>). Drill includes two new <code class="language-plaintext highlighter-rouge">sys</code> tables that provide real-time metrics about memory utilization and threads on each of the nodes in the cluster. You can run a simple <code class="language-plaintext highlighter-rouge">SELECT *</code> to see what information is available:</p>
+
+    <div class="language-sql highlighter-rouge"><div class="highlight"><pre class="highlight"><code>  <span class="k">SELECT</span> <span class="o">*</span> <span class="k">FROM</span> <span class="n">sys</span><span class="p">.</span><span class="n">drillmemory</span><span class="p">;</span>
+  <span class="k">SELECT</span> <span class="o">*</span> <span class="k">FROM</span> <span class="n">sys</span><span class="p">.</span><span class="n">drillbitthreads</span><span class="p">;</span>
+</code></pre></div>    </div>
+  </li>
+  <li><strong>Support for very wide tables</strong> (<a href="https://issues.apache.org/jira/browse/DRILL-2739">DRILL-2739</a>). Drill previously had some issues with tables that had more than 4095 colums. This limitation has been addressed.</li>
 </ul>
 
 <p>You can now <a href="/download/">download Drill 0.9</a>. As always, you can check out the official <a href="/docs/release-notes/">release notes</a> for more details.</p>
 
-<p>We&#39;re gearing up for Drill&#39;s 1.0 release later this month. Stay tuned!</p>
+<p>We’re gearing up for Drill’s 1.0 release later this month. Stay tuned!</p>
 
-<p>Happy Drilling!<br>
+<p>Happy Drilling!<br />
 Tomer Shiran and Jacques Nadeau</p>
 
   </article>
@@ -192,7 +195,7 @@ Tomer Shiran and Jacques Nadeau</p>
   <p class="push"></p>
 <div id="footer" class="mw">
 <div class="wrapper">
-Copyright © 2012-2014 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
+Copyright © 2012-2020 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
 Apache and the Apache feather logo are trademarks of The Apache Software Foundation. Other names appearing on the site may be trademarks of their respective owners.<br/><br/>
 </div>
 </div>
diff --git a/blog/2015/05/19/drill-1.0-released/index.html b/blog/2015/05/19/drill-1.0-released/index.html
index 6e476cd..a67a0db 100644
--- a/blog/2015/05/19/drill-1.0-released/index.html
+++ b/blog/2015/05/19/drill-1.0-released/index.html
@@ -147,32 +147,32 @@
     <p>We embarked on the Drill project in late 2012 with two primary objectives:</p>
 
 <ul>
-<li>Enable agility by getting rid of all the traditional overhead - namely, the need to load data, create and maintain schemas, transform data, etc. We wanted to develop a system that would support the speed and agility at which modern organizations want (or need) to operate in this era.</li>
-<li>Unlock the data housed in non-relational datastores like NoSQL, Hadoop and cloud storage, making it available not only to developers, but also business users, analysts, data scientists and anyone else who can write a SQL query or use a BI tool. Non-relational datastores are capturing an increasing share of the world&#39;s data, and it&#39;s incredibly hard to explore and analyze this data.</li>
+  <li>Enable agility by getting rid of all the traditional overhead - namely, the need to load data, create and maintain schemas, transform data, etc. We wanted to develop a system that would support the speed and agility at which modern organizations want (or need) to operate in this era.</li>
+  <li>Unlock the data housed in non-relational datastores like NoSQL, Hadoop and cloud storage, making it available not only to developers, but also business users, analysts, data scientists and anyone else who can write a SQL query or use a BI tool. Non-relational datastores are capturing an increasing share of the world’s data, and it’s incredibly hard to explore and analyze this data.</li>
 </ul>
 
-<p>Today we&#39;re happy to announce the availability of the production-ready Drill 1.0 release. This release addresses <a href="https://issues.apache.org/jira/secure/ReleaseNote.jspa?projectId=12313820&version=12325568">228 JIRAs</a> on top of the 0.9 release earlier this month. Highlights include:</p>
+<p>Today we’re happy to announce the availability of the production-ready Drill 1.0 release. This release addresses <a href="https://issues.apache.org/jira/secure/ReleaseNote.jspa?projectId=12313820&amp;version=12325568">228 JIRAs</a> on top of the 0.9 release earlier this month. Highlights include:</p>
 
 <ul>
-<li>Substantial improvements in stability, memory handling and performance</li>
-<li>Improvements in Drill CLI experience with addition of convenience shortcuts and improved colors/alignment</li>
-<li>Substantial additions to documentation including coverage of troubleshooting, performance tuning and many additions to the SQL reference</li>
-<li>Enhancements in join planning to facilitate high speed planning of large and complicated joins</li>
-<li>Add support for new context functions including <code>CURRENT_USER</code> and <code>CURRENT_SCHEMA</code></li>
-<li>Ability to treat all numbers as approximate decimals when reading JSON</li>
-<li>Enhancements in Drill&#39;s text and CSV handling to support first row skipping, configurable field/line delimiters and configurable quoting</li>
-<li>Improved JDBC compatibility (and tracing proxy for easy debugging).</li>
-<li>Ability to do JDBC connections with direct urls (avoiding ZooKeeper)</li>
-<li>Automatic selection of spooling or back-pressure exchange semantics to avoid distributed deadlocks in complex sort-heavy queries</li>
-<li>Improvements in query profile reporting</li>
-<li>Addition of <code>ILIKE(VARCHAR, PATTERN)</code> and <code>SUBSTR(VARCHAR, REGEX)</code> functions</li>
+  <li>Substantial improvements in stability, memory handling and performance</li>
+  <li>Improvements in Drill CLI experience with addition of convenience shortcuts and improved colors/alignment</li>
+  <li>Substantial additions to documentation including coverage of troubleshooting, performance tuning and many additions to the SQL reference</li>
+  <li>Enhancements in join planning to facilitate high speed planning of large and complicated joins</li>
+  <li>Add support for new context functions including <code class="language-plaintext highlighter-rouge">CURRENT_USER</code> and <code class="language-plaintext highlighter-rouge">CURRENT_SCHEMA</code></li>
+  <li>Ability to treat all numbers as approximate decimals when reading JSON</li>
+  <li>Enhancements in Drill’s text and CSV handling to support first row skipping, configurable field/line delimiters and configurable quoting</li>
+  <li>Improved JDBC compatibility (and tracing proxy for easy debugging).</li>
+  <li>Ability to do JDBC connections with direct urls (avoiding ZooKeeper)</li>
+  <li>Automatic selection of spooling or back-pressure exchange semantics to avoid distributed deadlocks in complex sort-heavy queries</li>
+  <li>Improvements in query profile reporting</li>
+  <li>Addition of <code class="language-plaintext highlighter-rouge">ILIKE(VARCHAR, PATTERN)</code> and <code class="language-plaintext highlighter-rouge">SUBSTR(VARCHAR, REGEX)</code> functions</li>
 </ul>
 
-<p>We would not have been able to reach this milestone without the tremendous effort by all the <a href="/team/">committers</a> and contributors, and we would like to congratulate the entire community on achieving this milestone. While 1.0 is an exciting milestone, it&#39;s really just the beginning of the journey. We&#39;ll release 1.1 next month, and continue with our 4-6 week release cycle, so you can count on many additional enhancements over the coming months.</p>
+<p>We would not have been able to reach this milestone without the tremendous effort by all the <a href="/team/">committers</a> and contributors, and we would like to congratulate the entire community on achieving this milestone. While 1.0 is an exciting milestone, it’s really just the beginning of the journey. We’ll release 1.1 next month, and continue with our 4-6 week release cycle, so you can count on many additional enhancements over the coming months.</p>
 
-<p>Also be sure to check out the <a href="/blog/2015/05/19/the-apache-software-foundation-announces-apache-drill-1.0/">Apache Software Foundation&#39;s press release</a>.</p>
+<p>Also be sure to check out the <a href="/blog/2015/05/19/the-apache-software-foundation-announces-apache-drill-1.0/">Apache Software Foundation’s press release</a>.</p>
 
-<p>Happy Drilling!<br>
+<p>Happy Drilling!<br />
 Tomer Shiran and Jacques Nadeau</p>
 
   </article>
@@ -197,7 +197,7 @@ Tomer Shiran and Jacques Nadeau</p>
   <p class="push"></p>
 <div id="footer" class="mw">
 <div class="wrapper">
-Copyright © 2012-2014 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
+Copyright © 2012-2020 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
 Apache and the Apache feather logo are trademarks of The Apache Software Foundation. Other names appearing on the site may be trademarks of their respective owners.<br/><br/>
 </div>
 </div>
diff --git a/blog/2015/05/19/the-apache-software-foundation-announces-apache-drill-1.0/index.html b/blog/2015/05/19/the-apache-software-foundation-announces-apache-drill-1.0/index.html
index aef1289..8e1168a 100644
--- a/blog/2015/05/19/the-apache-software-foundation-announces-apache-drill-1.0/index.html
+++ b/blog/2015/05/19/the-apache-software-foundation-announces-apache-drill-1.0/index.html
@@ -139,41 +139,41 @@
   <article class="post-content">
     <p><strong>Thousands of users adopt Open Source, enterprise-grade, schema-free SQL query engine for Apache Hadoop®, NoSQL and Cloud storage</strong></p>
 
-<p>Forest Hill, MD --19 May 2015-- The <a href="https://www.apache.org/">Apache Software Foundation (ASF)</a>, the all-volunteer developers, stewards, and incubators of more than 350 Open Source projects and initiatives, announced today the availability of Apache™ Drill™ 1.0, the schema-free SQL query engine for Apache Hadoop®, NoSQL and Cloud storage.</p>
+<p>Forest Hill, MD –19 May 2015– The <a href="https://www.apache.org/">Apache Software Foundation (ASF)</a>, the all-volunteer developers, stewards, and incubators of more than 350 Open Source projects and initiatives, announced today the availability of Apache™ Drill™ 1.0, the schema-free SQL query engine for Apache Hadoop®, NoSQL and Cloud storage.</p>
 
-<p>&quot;The production-ready 1.0 release represents a significant milestone for the Drill project,&quot; said Tomer Shiran, member of the Apache Drill Project Management Committee. &quot;It is the outcome of almost three years of development involving dozens of engineers from numerous companies. Apache Drill&#39;s flexibility and ease-of-use have attracted thousands of users, and the enterprise-grade reliability, security and performance in the 1.0 release will further accelerate adopti [...]
+<p>“The production-ready 1.0 release represents a significant milestone for the Drill project,” said Tomer Shiran, member of the Apache Drill Project Management Committee. “It is the outcome of almost three years of development involving dozens of engineers from numerous companies. Apache Drill’s flexibility and ease-of-use have attracted thousands of users, and the enterprise-grade reliability, security and performance in the 1.0 release will further accelerate adoption.”</p>
 
 <p>With the exponential growth of data in recent years, and the shift towards rapid application development, new data is increasingly being stored in non-relational, schema-free datastores including Hadoop, NoSQL and Cloud storage. Apache Drill revolutionizes data exploration and analytics by enabling analysts, business users, data scientists and developers to explore and analyze this data without sacrificing the flexibility and agility offered by these datastores. Drill processes the da [...]
 
-<p>&quot;Drill introduces the JSON document model to the world of SQL-based analytics and BI&quot; said Jacques Nadeau, Vice President of Apache Drill. &quot;This enables users to query fixed-schema, evolving-schema and schema-free data stored in a variety of formats and datastores. The architecture of relational query engines and databases is built on the assumption that all data has a simple and static structure that’s known in advance, and this 40-year-old assumption is simply no long [...]
+<p>“Drill introduces the JSON document model to the world of SQL-based analytics and BI” said Jacques Nadeau, Vice President of Apache Drill. “This enables users to query fixed-schema, evolving-schema and schema-free data stored in a variety of formats and datastores. The architecture of relational query engines and databases is built on the assumption that all data has a simple and static structure that’s known in advance, and this 40-year-old assumption is simply no longer valid. We de [...]
 
-<p>Apache Drill&#39;s architecture is unique in many ways. It is the only columnar execution engine that supports complex and schema-free data, and the only execution engine that performs data-driven query compilation (and re-compilation, also known as schema discovery) during query execution. These unique capabilities enable Drill to achieve record-breaking performance with the flexibility offered by the JSON document model.</p>
+<p>Apache Drill’s architecture is unique in many ways. It is the only columnar execution engine that supports complex and schema-free data, and the only execution engine that performs data-driven query compilation (and re-compilation, also known as schema discovery) during query execution. These unique capabilities enable Drill to achieve record-breaking performance with the flexibility offered by the JSON document model.</p>
 
 <p>The business intelligence (BI) partner ecosystem is embracing the power of Apache Drill. Organizations such as Information Builders, JReport (Jinfonet Software), MicroStrategy, Qlik®, Simba, Tableau, and TIBCO, are working closely with the Drill community to interoperate BI tools with Drill through standard ODBC and JDBC connectivity. This collaboration enables end users to explore data by leveraging sophisticated visualization tools and advanced analytics.</p>
 
-<p>&quot;We&#39;ve been using Apache Drill for the past six months,&quot; said Andrew Hamilton, CTO of Cardlytics. &quot;Its ease of deployment and use along with its ability to quickly process trillions of records has made it an invaluable tool inside Cardlytics. Queries that were previously insurmountable are now common occurrence. Congratulations to the Drill community on this momentous occasion.&quot;</p>
+<p>“We’ve been using Apache Drill for the past six months,” said Andrew Hamilton, CTO of Cardlytics. “Its ease of deployment and use along with its ability to quickly process trillions of records has made it an invaluable tool inside Cardlytics. Queries that were previously insurmountable are now common occurrence. Congratulations to the Drill community on this momentous occasion.”</p>
 
-<p>&quot;Drill&#39;s columnar execution engine and optimizer take full advantage of Apache Parquet&#39;s columnar storage to achieve maximum performance,&quot; said Julien Le Dem, Technical Lead of Analytics Data Pipeline at Twitter and Vice President of Apache Parquet. &quot;The Drill team has been a key contributor to the Parquet project, including recent enhancements to Parquet types and vectorization. The Drill team’s involvement in the Parquet community is instrumental in driving th [...]
+<p>“Drill’s columnar execution engine and optimizer take full advantage of Apache Parquet’s columnar storage to achieve maximum performance,” said Julien Le Dem, Technical Lead of Analytics Data Pipeline at Twitter and Vice President of Apache Parquet. “The Drill team has been a key contributor to the Parquet project, including recent enhancements to Parquet types and vectorization. The Drill team’s involvement in the Parquet community is instrumental in driving the standard.”</p>
 
-<p>&quot;Apache Drill 1.0 raises the bar for secure, reliable and scalable SQL-on-Hadoop,&quot; said Piyush Bhargava, distinguished engineer, IT, Cisco Systems. &quot;Because Drill integrates with existing data virtualization and visualization tools, we expect it will improve adoption of self-service data exploration and large-scale BI queries on our advanced Hadoop platform at Cisco.&quot;</p>
+<p>“Apache Drill 1.0 raises the bar for secure, reliable and scalable SQL-on-Hadoop,” said Piyush Bhargava, distinguished engineer, IT, Cisco Systems. “Because Drill integrates with existing data virtualization and visualization tools, we expect it will improve adoption of self-service data exploration and large-scale BI queries on our advanced Hadoop platform at Cisco.”</p>
 
-<p>&quot;MicroStrategy recognized early on the value of Apache Drill and is one of the first analytic platforms to certify Drill,&quot; said Tim Lang, senior executive vice president and chief technology officer at MicroStrategy Incorporated.  &quot;Because Drill is designed to be used with a minimal learning curve, it opens up more complex data sets to the end user who can immediately visualize and analyze new information using MicroStrategy’s advanced capabilities.&quot;</p>
+<p>“MicroStrategy recognized early on the value of Apache Drill and is one of the first analytic platforms to certify Drill,” said Tim Lang, senior executive vice president and chief technology officer at MicroStrategy Incorporated.  “Because Drill is designed to be used with a minimal learning curve, it opens up more complex data sets to the end user who can immediately visualize and analyze new information using MicroStrategy’s advanced capabilities.”</p>
 
-<p>&quot;Apache Drill closes a gap around self-service SQL queries in Hadoop, especially on complex, dynamic NoSQL data types,&quot; said Mike Foster, Strategic Alliances Technology Officer at Qlik.  &quot;Drill&#39;s performance advantages for Hadoop data access, combined with the Qlik associative experience, enables our customers to continue discovering business value from a wide range of data. Congratulations to the Apache Drill community.&quot;</p>
+<p>“Apache Drill closes a gap around self-service SQL queries in Hadoop, especially on complex, dynamic NoSQL data types,” said Mike Foster, Strategic Alliances Technology Officer at Qlik.  “Drill’s performance advantages for Hadoop data access, combined with the Qlik associative experience, enables our customers to continue discovering business value from a wide range of data. Congratulations to the Apache Drill community.”</p>
 
-<p>&quot;Apache Drill empowers people to access data that is traditionally difficult to work with,&quot; said Jeff Feng, product manager, Tableau.  &quot;Direct access within a centralized data repository and without pre-generating metadata definitions encourages data democracy which is essential for data-driven organizations. Additionally, Drill&#39;s instant and secure access to complex data formats, such as JSON, opens up extended analytical opportunities.&quot;</p>
+<p>“Apache Drill empowers people to access data that is traditionally difficult to work with,” said Jeff Feng, product manager, Tableau.  “Direct access within a centralized data repository and without pre-generating metadata definitions encourages data democracy which is essential for data-driven organizations. Additionally, Drill’s instant and secure access to complex data formats, such as JSON, opens up extended analytical opportunities.”</p>
 
-<p>&quot;Congratulations to the Apache Drill community on the availability of 1.0,&quot; said Karl Van den Bergh, Vice President, Products and Cloud at TIBCO. &quot;Drill promises to bring low-latency access to data stored in Hadoop and HBase via standard SQL semantics. This innovation is in line with the value of Fast Data analysis, which TIBCO customers welcome and appreciate.&quot;</p>
+<p>“Congratulations to the Apache Drill community on the availability of 1.0,” said Karl Van den Bergh, Vice President, Products and Cloud at TIBCO. “Drill promises to bring low-latency access to data stored in Hadoop and HBase via standard SQL semantics. This innovation is in line with the value of Fast Data analysis, which TIBCO customers welcome and appreciate.”</p>
 
-<p>&quot;The community&#39;s accomplishment is a testament to The Apache Software Foundation&#39;s ability to bring together diverse companies to work towards a common goal. None of this would have been possible without the contribution of engineers with advanced degrees and experience in relational databases, data warehousing, MPP, query optimization, Hadoop and NoSQL,&quot; added Nadeau. &quot;Our community&#39;s strength is what will solidify Apache Drill as a key data technology for  [...]
+<p>“The community’s accomplishment is a testament to The Apache Software Foundation’s ability to bring together diverse companies to work towards a common goal. None of this would have been possible without the contribution of engineers with advanced degrees and experience in relational databases, data warehousing, MPP, query optimization, Hadoop and NoSQL,” added Nadeau. “Our community’s strength is what will solidify Apache Drill as a key data technology for the next decade. We welcome [...]
 
 <p>Availability and Oversight
-Apache Drill 1.0 is available immediately as a free download from <a href="http://drill.apache.org/download/">http://drill.apache.org/download/</a>. Documentation is available at <a href="http://drill.apache.org/docs/">http://drill.apache.org/docs/</a>. As with all Apache products, Apache Drill software is released under the Apache License v2.0, and is overseen by a self-selected team of active contributors to the project. A Project Management Committee (PMC) guides the project&#39;s day [...]
+Apache Drill 1.0 is available immediately as a free download from http://drill.apache.org/download/. Documentation is available at http://drill.apache.org/docs/. As with all Apache products, Apache Drill software is released under the Apache License v2.0, and is overseen by a self-selected team of active contributors to the project. A Project Management Committee (PMC) guides the project’s day-to-day operations, including community development and product releases. For ways to become inv [...]
 
 <p>About The Apache Software Foundation (ASF)
-Established in 1999, the all-volunteer Foundation oversees more than 350 leading Open Source projects, including Apache HTTP Server --the world&#39;s most popular Web server software. Through the ASF&#39;s meritocratic process known as &quot;The Apache Way,&quot; more than 500 individual Members and 4,500 Committers successfully collaborate to develop freely available enterprise-grade software, benefiting millions of users worldwide: thousands of software solutions are distributed under  [...]
+Established in 1999, the all-volunteer Foundation oversees more than 350 leading Open Source projects, including Apache HTTP Server –the world’s most popular Web server software. Through the ASF’s meritocratic process known as “The Apache Way,” more than 500 individual Members and 4,500 Committers successfully collaborate to develop freely available enterprise-grade software, benefiting millions of users worldwide: thousands of software solutions are distributed under the Apache License; [...]
 
-<p>© The Apache Software Foundation. &quot;Apache&quot;, &quot;Apache Drill&quot;, &quot;Drill&quot;, &quot;Apache Hadoop&quot;, &quot;Hadoop&quot;, &quot;Apache Parquet&quot;, &quot;Parquet&quot;, and &quot;ApacheCon&quot;, are registered trademarks or trademarks of The Apache Software Foundation. All other brands and trademarks are the property of their respective owners.</p>
+<p>© The Apache Software Foundation. “Apache”, “Apache Drill”, “Drill”, “Apache Hadoop”, “Hadoop”, “Apache Parquet”, “Parquet”, and “ApacheCon”, are registered trademarks or trademarks of The Apache Software Foundation. All other brands and trademarks are the property of their respective owners.</p>
 
 <p># # #</p>
 
@@ -199,7 +199,7 @@ Established in 1999, the all-volunteer Foundation oversees more than 350 leading
   <p class="push"></p>
 <div id="footer" class="mw">
 <div class="wrapper">
-Copyright © 2012-2014 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
+Copyright © 2012-2020 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
 Apache and the Apache feather logo are trademarks of The Apache Software Foundation. Other names appearing on the site may be trademarks of their respective owners.<br/><br/>
 </div>
 </div>
diff --git a/blog/2015/07/05/drill-1.1-released/index.html b/blog/2015/07/05/drill-1.1-released/index.html
index a5c367c..121f6eb 100644
--- a/blog/2015/07/05/drill-1.1-released/index.html
+++ b/blog/2015/07/05/drill-1.1-released/index.html
@@ -137,69 +137,68 @@
   <div class="addthis_sharing_toolbox"></div>
 
   <article class="post-content">
-    <p>Today I&#39;m happy to announce the availability of the Drill 1.1 release. This release addresses <a href="https://issues.apache.org/jira/secure/ReleaseNote.jspa?projectId=12313820&version=12329689">162 JIRAs</a> on top of May&#39;s 1.0 release. Highlights include:</p>
+    <p>Today I’m happy to announce the availability of the Drill 1.1 release. This release addresses <a href="https://issues.apache.org/jira/secure/ReleaseNote.jspa?projectId=12313820&amp;version=12329689">162 JIRAs</a> on top of May’s 1.0 release. Highlights include:</p>
 
 <h2 id="automatic-partitioning-for-parquet-files">Automatic Partitioning for Parquet Files</h2>
 
-<p>Drill now supports creating partitions automatically when using <code>CREATE TABLE AS</code> (CTAS) to generate Parquet files. By leveraging the unique capabilities of the Parquet file format, Drill is able provide this capability and still maintain data integrity and compatibility with all other systems that consume Parquet (a world&#39;s first). This functionality can be exercised by utilizing the new <a href="https://drill.apache.org/docs/partition-by-clause/"><code>PARTITION BY</c [...]
+<p>Drill now supports creating partitions automatically when using <code class="language-plaintext highlighter-rouge">CREATE TABLE AS</code> (CTAS) to generate Parquet files. By leveraging the unique capabilities of the Parquet file format, Drill is able provide this capability and still maintain data integrity and compatibility with all other systems that consume Parquet (a world’s first). This functionality can be exercised by utilizing the new <a href="https://drill.apache.org/docs/pa [...]
 
 <h2 id="window-functions">Window Functions</h2>
 
 <p>Drill now has first-class support for SQL window Functions. This includes:</p>
 
 <ul>
-<li>Aggregate Functions: <code>AVG</code>, <code>COUNT</code>, <code>MAX</code>, <code>MIN</code>, <code>SUM</code></li>
-<li>Ranking Functions: <code>CUME_DIST</code>, <code>DENSE_RANK</code>, <code>PERCENT_RANK</code>, <code>RANK</code> and <code>ROW_NUMBER</code></li>
+  <li>Aggregate Functions: <code class="language-plaintext highlighter-rouge">AVG</code>, <code class="language-plaintext highlighter-rouge">COUNT</code>, <code class="language-plaintext highlighter-rouge">MAX</code>, <code class="language-plaintext highlighter-rouge">MIN</code>, <code class="language-plaintext highlighter-rouge">SUM</code></li>
+  <li>Ranking Functions: <code class="language-plaintext highlighter-rouge">CUME_DIST</code>, <code class="language-plaintext highlighter-rouge">DENSE_RANK</code>, <code class="language-plaintext highlighter-rouge">PERCENT_RANK</code>, <code class="language-plaintext highlighter-rouge">RANK</code> and <code class="language-plaintext highlighter-rouge">ROW_NUMBER</code></li>
 </ul>
 
-<p>The community has done an excellent job providing a <a href="https://drill.apache.org/docs/sql-window-functions-introduction/">comprehensive documentation</a> so that you can start using window functions immediately.  </p>
+<p>The community has done an excellent job providing a <a href="https://drill.apache.org/docs/sql-window-functions-introduction/">comprehensive documentation</a> so that you can start using window functions immediately.</p>
 
 <h2 id="hive-storage-plugin-enhancements">Hive Storage Plugin Enhancements</h2>
 
-<p>We&#39;ve enhanced Drill to work even better with Hive. We started by upgrading Drill&#39;s support to work with Hive 1.0. In addition, Drill now provides a powerful new security feature called <a href="https://drill.apache.org/docs/configuring-user-impersonation-with-hive-authorization/">delegated Hive impersonation</a>. Compatibility and performance are also improved with better data type support (including support for binary, tinyint and smallint data types) and better concurrency.</p>
+<p>We’ve enhanced Drill to work even better with Hive. We started by upgrading Drill’s support to work with Hive 1.0. In addition, Drill now provides a powerful new security feature called <a href="https://drill.apache.org/docs/configuring-user-impersonation-with-hive-authorization/">delegated Hive impersonation</a>. Compatibility and performance are also improved with better data type support (including support for binary, tinyint and smallint data types) and better concurrency.</p>
 
 <h2 id="sql-union-improvements">SQL UNION Improvements</h2>
 
-<p>We&#39;ve enhanced support for SQL <code>UNION</code> functionality.  Drill now supports both <code>UNION</code> and <code>UNION ALL</code> capabilities.  We&#39;ve also improved the query optimizer to better optimize plans that include the <code>UNION</code> clause.</p>
+<p>We’ve enhanced support for SQL <code class="language-plaintext highlighter-rouge">UNION</code> functionality.  Drill now supports both <code class="language-plaintext highlighter-rouge">UNION</code> and <code class="language-plaintext highlighter-rouge">UNION ALL</code> capabilities.  We’ve also improved the query optimizer to better optimize plans that include the <code class="language-plaintext highlighter-rouge">UNION</code> clause.</p>
 
 <h2 id="new-features-for-complex-data">New Features For Complex Data</h2>
 
-<p>Drill&#39;s complex data capabilities continue to be the best in the market, now including support for <code>COUNT</code> aggregations on various types of complex objects. Also, Drill&#39;s <code>FLATTEN</code> function now supports very large complex objects.</p>
+<p>Drill’s complex data capabilities continue to be the best in the market, now including support for <code class="language-plaintext highlighter-rouge">COUNT</code> aggregations on various types of complex objects. Also, Drill’s <code class="language-plaintext highlighter-rouge">FLATTEN</code> function now supports very large complex objects.</p>
 
 <h2 id="improved-jdbc-driver-compatibility">Improved JDBC Driver Compatibility</h2>
 
-<p>Apache Drill&#39;s JDBC driver continues to improve. This includes a number of enhancements and fixes to better support JDBC tools including <a href="https://drill.apache.org/docs/using-microstrategy-analytics-with-apache-drill/">MicroStrategy</a>, <a href="https://drill.apache.org/docs/using-tibco-spotfire-desktop-with-drill/">TIBCO Spotfire</a>, <a href="https://drill.apache.org/docs/configuring-jreport-with-drill/">JReport</a> and <a href="https://drill.apache.org/docs/using-jdbc-w [...]
+<p>Apache Drill’s JDBC driver continues to improve. This includes a number of enhancements and fixes to better support JDBC tools including <a href="https://drill.apache.org/docs/using-microstrategy-analytics-with-apache-drill/">MicroStrategy</a>, <a href="https://drill.apache.org/docs/using-tibco-spotfire-desktop-with-drill/">TIBCO Spotfire</a>, <a href="https://drill.apache.org/docs/configuring-jreport-with-drill/">JReport</a> and <a href="https://drill.apache.org/docs/using-jdbc-with- [...]
 
-<p>The Apache Drill community now publishes the JDBC driver for easy inclusion in your application through the use of Maven coordinates. You can incorporate Drill&#39;s JDBC driver into your application by <a href="http://search.maven.org/remotecontent?filepath=org/apache/drill/exec/drill-jdbc-all/1.1.0/drill-jdbc-all-1.1.0.jar">downloading it directly</a> or referencing the following coordinates within your Maven application.  </p>
-<div class="highlight"><pre><code class="language-text" data-lang="text">&lt;dependency&gt;
+<p>The Apache Drill community now publishes the JDBC driver for easy inclusion in your application through the use of Maven coordinates. You can incorporate Drill’s JDBC driver into your application by <a href="http://search.maven.org/remotecontent?filepath=org/apache/drill/exec/drill-jdbc-all/1.1.0/drill-jdbc-all-1.1.0.jar">downloading it directly</a> or referencing the following coordinates within your Maven application.</p>
+
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>&lt;dependency&gt;
   &lt;groupId&gt;org.apache.drill.exec&lt;/groupId&gt;
   &lt;artifactId&gt;drill-jdbc-all&lt;/artifactId&gt;
   &lt;version&gt;1.1.0&lt;/version&gt;
 &lt;/dependency&gt;
-</code></pre></div>
-<h2 id="mongodb-3-0-support">MongoDB 3.0 Support</h2>
+</code></pre></div></div>
 
-<p>Drill now uses MongoDB&#39;s latest Java driver and has enhanced connection pooling for better performance and resilience in large-scale deployments.  Learn more about using the <a href="https://drill.apache.org/docs/mongodb-plugin-for-apache-drill/">MongoDB plugin</a>.</p>
+<h2 id="mongodb-30-support">MongoDB 3.0 Support</h2>
+<p>Drill now uses MongoDB’s latest Java driver and has enhanced connection pooling for better performance and resilience in large-scale deployments.  Learn more about using the <a href="https://drill.apache.org/docs/mongodb-plugin-for-apache-drill/">MongoDB plugin</a>.</p>
 
 <h2 id="many-more-fixes">Many More Fixes</h2>
-
-<p>Drill includes a variety of <a href="https://issues.apache.org/jira/secure/ReleaseNote.jspa?projectId=12313820&version=12329689">other fixes and enhancements</a> including:</p>
+<p>Drill includes a variety of <a href="https://issues.apache.org/jira/secure/ReleaseNote.jspa?projectId=12313820&amp;version=12329689">other fixes and enhancements</a> including:</p>
 
 <ul>
-<li>Improvements for certain types of exists and correlated subqueries</li>
-<li>Fixes for running Drill on Windows</li>
-<li>Improvements in implicit casting capabilities in schemaless scenarios</li>
-<li>Improvements in <code>INFORMATION_SCHEMA</code></li>
-<li>Fixes for the Web UI and the REST API</li>
+  <li>Improvements for certain types of exists and correlated subqueries</li>
+  <li>Fixes for running Drill on Windows</li>
+  <li>Improvements in implicit casting capabilities in schemaless scenarios</li>
+  <li>Improvements in <code class="language-plaintext highlighter-rouge">INFORMATION_SCHEMA</code></li>
+  <li>Fixes for the Web UI and the REST API</li>
 </ul>
 
 <h2 id="more-frequent-releases">More Frequent Releases</h2>
-
-<p>The amazing Drill community continues to be the driving force behind these rapid iterative releases.  Thanks to all the <a href="/team/">committers</a> and contributors that made this release possible!  The community&#39;s goal continues to be maintaining a 4-6 release cycle throughout the summer.  This means you will have many more enhancements to look for in the next few months.  If you use Drill, want to, or want to help develop it, drop by in <a href="https://drill.apache.org/mail [...]
+<p>The amazing Drill community continues to be the driving force behind these rapid iterative releases.  Thanks to all the <a href="/team/">committers</a> and contributors that made this release possible!  The community’s goal continues to be maintaining a 4-6 release cycle throughout the summer.  This means you will have many more enhancements to look for in the next few months.  If you use Drill, want to, or want to help develop it, drop by in <a href="https://drill.apache.org/mailingl [...]
 
 <p>Download the <a href="https://drill.apache.org/download/">Drill 1.1.0 release</a> now and let us know your thoughts.</p>
 
-<p>Drill On!<br>
+<p>Drill On!<br />
 Jacques Nadeau</p>
 
   </article>
@@ -224,7 +223,7 @@ Jacques Nadeau</p>
   <p class="push"></p>
 <div id="footer" class="mw">
 <div class="wrapper">
-Copyright © 2012-2014 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
+Copyright © 2012-2020 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
 Apache and the Apache feather logo are trademarks of The Apache Software Foundation. Other names appearing on the site may be trademarks of their respective owners.<br/><br/>
 </div>
 </div>
diff --git a/blog/2015/07/23/drill-tutorial-at-nosql-now-2015/index.html b/blog/2015/07/23/drill-tutorial-at-nosql-now-2015/index.html
index 12e3399..c37b18f 100644
--- a/blog/2015/07/23/drill-tutorial-at-nosql-now-2015/index.html
+++ b/blog/2015/07/23/drill-tutorial-at-nosql-now-2015/index.html
@@ -153,9 +153,9 @@
     <span class="_date_format">MM-DD-YYYY</span>
 </a></p>
 
-<p>NoSQL Now! 2015 will be hosting a <a href="http://nosql2015.dataversity.net/sessionPop.cfm?confid=90&proposalid=7727">3-hour tutorial</a> on Apache Drill. Jacques Nadeau and I will provide a deep dive on Drill and demonstrate how to analyze NoSQL data with SQL queries and standard BI tools. We would love to see you there!</p>
+<p>NoSQL Now! 2015 will be hosting a <a href="http://nosql2015.dataversity.net/sessionPop.cfm?confid=90&amp;proposalid=7727">3-hour tutorial</a> on Apache Drill. Jacques Nadeau and I will provide a deep dive on Drill and demonstrate how to analyze NoSQL data with SQL queries and standard BI tools. We would love to see you there!</p>
 
-<p>When you <a href="http://nosql2015.dataversity.net/reg.cfm">register</a>, use the coupon code &quot;SPEAKER&quot; for a 20% discount on the registration fees.</p>
+<p>When you <a href="http://nosql2015.dataversity.net/reg.cfm">register</a>, use the coupon code “SPEAKER” for a 20% discount on the registration fees.</p>
 
   </article>
  <div id="disqus_thread"></div>
@@ -179,7 +179,7 @@
   <p class="push"></p>
 <div id="footer" class="mw">
 <div class="wrapper">
-Copyright © 2012-2014 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
+Copyright © 2012-2020 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
 Apache and the Apache feather logo are trademarks of The Apache Software Foundation. Other names appearing on the site may be trademarks of their respective owners.<br/><br/>
 </div>
 </div>
diff --git a/blog/2015/10/16/drill-1.2-released/index.html b/blog/2015/10/16/drill-1.2-released/index.html
index 3f56695..934ab0f 100644
--- a/blog/2015/10/16/drill-1.2-released/index.html
+++ b/blog/2015/10/16/drill-1.2-released/index.html
@@ -137,7 +137,7 @@
   <div class="addthis_sharing_toolbox"></div>
 
   <article class="post-content">
-    <p>Today I&#39;m happy to announce the availability of the Drill 1.2 release. This release addresses <a href="https://issues.apache.org/jira/secure/ReleaseNote.jspa?version=12332042&projectId=12313820">217 JIRAs</a> on top of the 1.1 release. Highlights include:</p>
+    <p>Today I’m happy to announce the availability of the Drill 1.2 release. This release addresses <a href="https://issues.apache.org/jira/secure/ReleaseNote.jspa?version=12332042&amp;projectId=12313820">217 JIRAs</a> on top of the 1.1 release. Highlights include:</p>
 
 <h2 id="relational-database-support">Relational Database Support</h2>
 
@@ -145,35 +145,34 @@
 
 <h2 id="new-window-functions">New Window Functions</h2>
 
-<p>The 1.2 release adds additional window functions: <code>NTILE</code>, <code>FIRST_VALUE</code>, <code>LAST_VALUE</code>, <code>LEAD</code> and <code>LAG</code>. Drill now supports <a href="/docs/sql-window-functions-introduction/">15 different window functions</a>:</p>
+<p>The 1.2 release adds additional window functions: <code class="language-plaintext highlighter-rouge">NTILE</code>, <code class="language-plaintext highlighter-rouge">FIRST_VALUE</code>, <code class="language-plaintext highlighter-rouge">LAST_VALUE</code>, <code class="language-plaintext highlighter-rouge">LEAD</code> and <code class="language-plaintext highlighter-rouge">LAG</code>. Drill now supports <a href="/docs/sql-window-functions-introduction/">15 different window functions</a>:</p>
 
 <ul>
-<li>Value Functions: <code>FIRST_VALUE</code>, <code>LAST_VALUE</code>, <code>LEAD</code>, <code>LAG</code></li>
-<li>Aggregate Functions: <code>AVG</code>, <code>COUNT</code>, <code>MAX</code>, <code>MIN</code>, <code>SUM</code></li>
-<li>Ranking Functions: <code>CUME_DIST</code>, <code>DENSE_RANK</code>, <code>NTILE</code>, <code>PERCENT_RANK</code>, <code>RANK</code>, <code>ROW_NUMBER</code></li>
+  <li>Value Functions: <code class="language-plaintext highlighter-rouge">FIRST_VALUE</code>, <code class="language-plaintext highlighter-rouge">LAST_VALUE</code>, <code class="language-plaintext highlighter-rouge">LEAD</code>, <code class="language-plaintext highlighter-rouge">LAG</code></li>
+  <li>Aggregate Functions: <code class="language-plaintext highlighter-rouge">AVG</code>, <code class="language-plaintext highlighter-rouge">COUNT</code>, <code class="language-plaintext highlighter-rouge">MAX</code>, <code class="language-plaintext highlighter-rouge">MIN</code>, <code class="language-plaintext highlighter-rouge">SUM</code></li>
+  <li>Ranking Functions: <code class="language-plaintext highlighter-rouge">CUME_DIST</code>, <code class="language-plaintext highlighter-rouge">DENSE_RANK</code>, <code class="language-plaintext highlighter-rouge">NTILE</code>, <code class="language-plaintext highlighter-rouge">PERCENT_RANK</code>, <code class="language-plaintext highlighter-rouge">RANK</code>, <code class="language-plaintext highlighter-rouge">ROW_NUMBER</code></li>
 </ul>
 
-<p>In addition to supporting new window functions, Drill 1.2 adds support for multiple window functions in a single query. A query can contain multiple window functions that slice up the data in different ways by means of different <code>OVER</code> clauses, but they all act on the same collection of rows.</p>
+<p>In addition to supporting new window functions, Drill 1.2 adds support for multiple window functions in a single query. A query can contain multiple window functions that slice up the data in different ways by means of different <code class="language-plaintext highlighter-rouge">OVER</code> clauses, but they all act on the same collection of rows.</p>
 
 <h2 id="parquet-metadata-caching">Parquet Metadata Caching</h2>
 
-<p>When running a query against a directory tree with Parquet files, Drill scans the directory and reads the footers of the files during the planning phase. This allows Drill to prune partitions and optimize query execution for data locality. However, this process can be time consuming for directory trees with thousands of files. Drill 1.2 includes <a href="/docs/optimizing-parquet-reading/">a new feature</a> that caches the metadata information so that subsequent queries don&#39;t need  [...]
+<p>When running a query against a directory tree with Parquet files, Drill scans the directory and reads the footers of the files during the planning phase. This allows Drill to prune partitions and optimize query execution for data locality. However, this process can be time consuming for directory trees with thousands of files. Drill 1.2 includes <a href="/docs/optimizing-parquet-reading/">a new feature</a> that caches the metadata information so that subsequent queries don’t need to s [...]
 
 <h2 id="performance-improvements-on-hbase-and-hive-tables">Performance Improvements on HBase and Hive Tables</h2>
 
 <p>Drill 1.2 introduces a faster read path for HBase and Hive tables. When querying Hive tables backed by Parquet files, Drill now uses a high-performance Parquet reader rather than the Hive SerDe.</p>
 
-<h2 id="drop-table-for-files-and-directories"><code>DROP TABLE</code> for Files and Directories</h2>
+<h2 id="drop-table-for-files-and-directories"><code class="language-plaintext highlighter-rouge">DROP TABLE</code> for Files and Directories</h2>
 
-<p>Drill 1.2 allows users to drop file- and directory-based tables with a SQL command (<code>DROP TABLE</code>).</p>
+<p>Drill 1.2 allows users to drop file- and directory-based tables with a SQL command (<code class="language-plaintext highlighter-rouge">DROP TABLE</code>).</p>
 
 <h2 id="enhanced-mongodb-integration">Enhanced MongoDB Integration</h2>
 
-<p>Drill 1.2 supports extended JSON types, addressing previous issues with queries on MongoDB collections. </p>
+<p>Drill 1.2 supports extended JSON types, addressing previous issues with queries on MongoDB collections.</p>
 
 <h2 id="many-more-fixes">Many More Fixes</h2>
-
-<p>Drill 1.2 includes hundreds of <a href="https://issues.apache.org/jira/secure/ReleaseNote.jspa?version=12332042&projectId=12313820">other fixes and enhancements</a>.</p>
+<p>Drill 1.2 includes hundreds of <a href="https://issues.apache.org/jira/secure/ReleaseNote.jspa?version=12332042&amp;projectId=12313820">other fixes and enhancements</a>.</p>
 
 <p>Download the <a href="https://drill.apache.org/download/">Drill 1.2 release</a> now and let us know your thoughts.</p>
 
@@ -202,7 +201,7 @@ Jacques Nadeau</p>
   <p class="push"></p>
 <div id="footer" class="mw">
 <div class="wrapper">
-Copyright © 2012-2014 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
+Copyright © 2012-2020 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
 Apache and the Apache feather logo are trademarks of The Apache Software Foundation. Other names appearing on the site may be trademarks of their respective owners.<br/><br/>
 </div>
 </div>
diff --git a/blog/2015/11/23/drill-1.3-released/index.html b/blog/2015/11/23/drill-1.3-released/index.html
index a49a325..9737f38 100644
--- a/blog/2015/11/23/drill-1.3-released/index.html
+++ b/blog/2015/11/23/drill-1.3-released/index.html
@@ -137,13 +137,13 @@
   <div class="addthis_sharing_toolbox"></div>
 
   <article class="post-content">
-    <p>Today I&#39;m happy to announce the availability of the Drill 1.3 release. This release addresses <a href="https://issues.apache.org/jira/secure/ReleaseNote.jspa?projectId=12313820&version=12332946">58 JIRAs</a> on top of the 1.2 release. Highlights include:</p>
+    <p>Today I’m happy to announce the availability of the Drill 1.3 release. This release addresses <a href="https://issues.apache.org/jira/secure/ReleaseNote.jspa?projectId=12313820&amp;version=12332946">58 JIRAs</a> on top of the 1.2 release. Highlights include:</p>
 
 <h2 id="enhanced-amazon-s3-support">Enhanced Amazon S3 Support</h2>
 
 <p>Drill 1.3 utilizes a new library, called s3a, for reading data from S3. The s3a library includes improvements over the previous s3n library, such as higher performance and the ability to read large files (over 5GB).</p>
 
-<p>In addition to the new s3a library, Drill 1.3 makes it easier to set up your AWS credentials. Simply edit the file <code>conf/core-site.xml</code> in the Drill install directory. For more information, check out the <a href="/docs/s3-storage-plugin/">step-by-step instructions</a> in the documentation.</p>
+<p>In addition to the new s3a library, Drill 1.3 makes it easier to set up your AWS credentials. Simply edit the file <code class="language-plaintext highlighter-rouge">conf/core-site.xml</code> in the Drill install directory. For more information, check out the <a href="/docs/s3-storage-plugin/">step-by-step instructions</a> in the documentation.</p>
 
 <h2 id="heterogeneous-types">Heterogeneous Types</h2>
 
@@ -151,22 +151,27 @@
 
 <p>Drill 1.3 provides a collection of functions that enable you to test the data type of a value. For example, if you have a column that has both lists (arrays) and numbers, you can use the following query to extract the first element from the array values:</p>
 
-<p><code>SELECT 1 + CASE WHEN is_list(a) THEN a[0] ELSE a END FROM table;</code></p>
+<p><code class="language-plaintext highlighter-rouge">SELECT 1 + CASE WHEN is_list(a) THEN a[0] ELSE a END FROM table;</code></p>
 
 <h2 id="text-file-headers">Text File Headers</h2>
 
-<p>Drill is now able to parse the header row in a text file (CSV, TSV, etc.). Prior to Drill 1.3, data had to be accessed through the <code>columns</code> array:</p>
-<div class="highlight"><pre><code class="language-text" data-lang="text">SELECT columns[0], columns[1] FROM dfs.`/path/to/users.csv`
-</code></pre></div>
+<p>Drill is now able to parse the header row in a text file (CSV, TSV, etc.). Prior to Drill 1.3, data had to be accessed through the <code class="language-plaintext highlighter-rouge">columns</code> array:</p>
+
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>SELECT columns[0], columns[1] FROM dfs.`/path/to/users.csv`
+</code></pre></div></div>
+
 <p>With Drill 1.3, you can use the actual column names in the CSV file:</p>
-<div class="highlight"><pre><code class="language-text" data-lang="text">SELECT name, address FROM dfs.`/path/to/users.csv`
-</code></pre></div>
-<p>Enabling header parsing is as simple as setting the <code>extractHeader</code> parameter in the storage plugin configuration for the desired file extensions. For more information, check out <a href="/docs/text-files-csv-tsv-psv/">the documentation</a>.</p>
+
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>SELECT name, address FROM dfs.`/path/to/users.csv`
+</code></pre></div></div>
+
+<p>Enabling header parsing is as simple as setting the <code class="language-plaintext highlighter-rouge">extractHeader</code> parameter in the storage plugin configuration for the desired file extensions. For more information, check out <a href="/docs/text-files-csv-tsv-psv/">the documentation</a>.</p>
 
 <h2 id="sequence-files">Sequence Files</h2>
 
 <p>Drill now <a href="/docs/querying-sequence-files/">supports sequence files</a>, a format commonly used in the Hadoop ecosystem. A sequence file contains a series of keys and values, and querying it with Drill is as easy as querying any other self-describing format:</p>
-<div class="highlight"><pre><code class="language-text" data-lang="text">SELECT *
+
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>SELECT *
 FROM dfs.tmp.`simple.seq`
 LIMIT 1;
 +--------------+---------------+
@@ -174,9 +179,11 @@ LIMIT 1;
 +--------------+---------------+
 | [B@70828f46  | [B@b8c765f    |
 +--------------+---------------+
-</code></pre></div>
-<p>Drill&#39;s <code>CONVERT_FROM</code> function makes it easy to decode the binary values:</p>
-<div class="highlight"><pre><code class="language-text" data-lang="text">SELECT CONVERT_FROM(binary_key, &#39;UTF8&#39;), CONVERT_FROM(binary_value, &#39;UTF8&#39;)
+</code></pre></div></div>
+
+<p>Drill’s <code class="language-plaintext highlighter-rouge">CONVERT_FROM</code> function makes it easy to decode the binary values:</p>
+
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>SELECT CONVERT_FROM(binary_key, 'UTF8'), CONVERT_FROM(binary_value, 'UTF8')
 FROM dfs.tmp.`simple.seq`
 LIMIT 1
 ;
@@ -185,10 +192,11 @@ LIMIT 1
 +-----------+-------------+
 | key0      |   value0    |
 +-----------+-------------+
-</code></pre></div>
+</code></pre></div></div>
+
 <h2 id="many-more-fixes">Many More Fixes</h2>
 
-<p>Drill 1.3 includes many other improvements, including enhancements related to querying Hive tables, MongoDB collections and Avro files. Check out the complete list of <a href="https://issues.apache.org/jira/secure/ReleaseNote.jspa?projectId=12313820&version=12332946">fixes and enhancements</a> for more information.</p>
+<p>Drill 1.3 includes many other improvements, including enhancements related to querying Hive tables, MongoDB collections and Avro files. Check out the complete list of <a href="https://issues.apache.org/jira/secure/ReleaseNote.jspa?projectId=12313820&amp;version=12332946">fixes and enhancements</a> for more information.</p>
 
 <p>Download the <a href="https://drill.apache.org/download/">Drill 1.3 release</a> now and let us know your thoughts.</p>
 
@@ -217,7 +225,7 @@ Jacques Nadeau</p>
   <p class="push"></p>
 <div id="footer" class="mw">
 <div class="wrapper">
-Copyright © 2012-2014 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
+Copyright © 2012-2020 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
 Apache and the Apache feather logo are trademarks of The Apache Software Foundation. Other names appearing on the site may be trademarks of their respective owners.<br/><br/>
 </div>
 </div>
diff --git a/blog/2015/12/14/drill-1.4-released/index.html b/blog/2015/12/14/drill-1.4-released/index.html
index 920fde4..b65a34b 100644
--- a/blog/2015/12/14/drill-1.4-released/index.html
+++ b/blog/2015/12/14/drill-1.4-released/index.html
@@ -137,41 +137,52 @@
   <div class="addthis_sharing_toolbox"></div>
 
   <article class="post-content">
-    <p>Apache Drill 1.4 (<a href="https://drill.apache.org/download/">available here</a>) includes bug fixes and enhancements from <a href="https://issues.apache.org/jira/secure/ReleaseNote.jspa?version=12332947&projectId=12313820">32 
+    <p>Apache Drill 1.4 (<a href="https://drill.apache.org/download/">available here</a>) includes bug fixes and enhancements from <a href="https://issues.apache.org/jira/secure/ReleaseNote.jspa?version=12332947&amp;projectId=12313820">32 
 JIRAs</a>.</p>
 
-<p>Here&#39;s a list of highlights from this newest version of Drill:</p>
+<p>Here’s a list of highlights from this newest version of Drill:</p>
 
 <h2 id="select-with-options">Select With Options</h2>
+<p>Queries that change storage plugin configuration options can now be written. For instance, to query the file <code class="language-plaintext highlighter-rouge">CO.dat</code>, the following can be used:</p>
 
-<p>Queries that change storage plugin configuration options can now be written. For instance, to query the file <code>CO.dat</code>, the following can be used:</p>
-<div class="highlight"><pre><code class="language-text" data-lang="text">SELECT * FROM TABLE(dfs.`/path/to/CO.dat`(type =&gt; &#39;text&#39;));
-</code></pre></div>
-<p>If a version of <code>CO.dat</code> with a header is available, the first entries of the file can be parsed as column names by 
-passing an <code>extractHeader =&gt; true</code> argument. We can also use a pipe symbol, &#39;|&#39;, as the delimiter by passing 
-<code>fieldDelimiter</code>:</p>
-<div class="highlight"><pre><code class="language-text" data-lang="text">SELECT * FROM TABLE(dfs.`/path/to/CO.dat`(type =&gt; &#39;text&#39;, fieldDelimiter =&gt; &#39;|&#39;, extractHeader =&gt; true));
-</code></pre></div>
-<p>Additionally, <code>lineDelimiter</code> can be used to indicate a deliminter for new lines, such as the double pipe, &#39;||&#39;, symbol in this example:</p>
-<div class="highlight"><pre><code class="language-text" data-lang="text">SELECT * FROM TABLE(dfs.`/path/to/CO.dat`(type =&gt; &#39;text&#39;, lineDelimiter =&gt; &#39;||&#39;, fieldDelimiter =&gt; &#39;|&#39;));
-</code></pre></div>
-<h2 id="improved-behavior-for-csv-header-parsing">Improved Behavior For CSV Header Parsing</h2>
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>SELECT * FROM TABLE(dfs.`/path/to/CO.dat`(type =&gt; 'text'));
+</code></pre></div></div>
+
+<p>If a version of <code class="language-plaintext highlighter-rouge">CO.dat</code> with a header is available, the first entries of the file can be parsed as column names by 
+passing an <code class="language-plaintext highlighter-rouge">extractHeader =&gt; true</code> argument. We can also use a pipe symbol, ‘|’, as the delimiter by passing 
+<code class="language-plaintext highlighter-rouge">fieldDelimiter</code>:</p>
+
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>SELECT * FROM TABLE(dfs.`/path/to/CO.dat`(type =&gt; 'text', fieldDelimiter =&gt; '|', extractHeader =&gt; true));
+</code></pre></div></div>
+
+<table>
+  <tbody>
+    <tr>
+      <td>Additionally, <code class="language-plaintext highlighter-rouge">lineDelimiter</code> can be used to indicate a deliminter for new lines, such as the double pipe, ‘</td>
+      <td> </td>
+      <td>’, symbol in this example:</td>
+    </tr>
+  </tbody>
+</table>
+
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>SELECT * FROM TABLE(dfs.`/path/to/CO.dat`(type =&gt; 'text', lineDelimiter =&gt; '||', fieldDelimiter =&gt; '|'));
+</code></pre></div></div>
 
+<h2 id="improved-behavior-for-csv-header-parsing">Improved Behavior For CSV Header Parsing</h2>
 <p>When header parsing is enabled, queries to CSV files no longer raise an exception if the indicated column does not 
-exist. Instead, Drill now returns <code>null</code> values for that column.</p>
+exist. Instead, Drill now returns <code class="language-plaintext highlighter-rouge">null</code> values for that column.</p>
 
 <h2 id="json-formatting">JSON Formatting</h2>
+<p>For more compact results, Drill’s default behavior of pretty-printing JSON can now be changed by setting the variable 
+<code class="language-plaintext highlighter-rouge">store.json.writer.uglify</code> to <code class="language-plaintext highlighter-rouge">true</code>. As in:</p>
 
-<p>For more compact results, Drill&#39;s default behavior of pretty-printing JSON can now be changed by setting the variable 
-<code>store.json.writer.uglify</code> to <code>true</code>. As in:</p>
-<div class="highlight"><pre><code class="language-text" data-lang="text">ALTER SESSION SET store.json.writer.uglify = true;
-</code></pre></div>
-<h2 id="better-logging">Better Logging</h2>
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>ALTER SESSION SET store.json.writer.uglify = true;
+</code></pre></div></div>
 
-<p>SQL query text is now logged to the <code>drillbit.log</code> file.</p>
+<h2 id="better-logging">Better Logging</h2>
+<p>SQL query text is now logged to the <code class="language-plaintext highlighter-rouge">drillbit.log</code> file.</p>
 
 <h2 id="other-improvements">Other Improvements</h2>
-
 <p>This version also features schema change compatible sorting, better Apache Hive support, and more efficient caching for Parquet file metadata.</p>
 
   </article>
@@ -196,7 +207,7 @@ exist. Instead, Drill now returns <code>null</code> values for that column.</p>
   <p class="push"></p>
 <div id="footer" class="mw">
 <div class="wrapper">
-Copyright © 2012-2014 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
+Copyright © 2012-2020 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
 Apache and the Apache feather logo are trademarks of The Apache Software Foundation. Other names appearing on the site may be trademarks of their respective owners.<br/><br/>
 </div>
 </div>
diff --git a/blog/2016/02/16/drill-1.5-released/index.html b/blog/2016/02/16/drill-1.5-released/index.html
index f756234..2826e57 100644
--- a/blog/2016/02/16/drill-1.5-released/index.html
+++ b/blog/2016/02/16/drill-1.5-released/index.html
@@ -144,27 +144,23 @@
   <div class="addthis_sharing_toolbox"></div>
 
   <article class="post-content">
-    <p>Today, we&#39;re happy to announce the availability of Drill 1.5.0. You can download it <a href="https://drill.apache.org/download/">here</a>.</p>
+    <p>Today, we’re happy to announce the availability of Drill 1.5.0. You can download it <a href="https://drill.apache.org/download/">here</a>.</p>
 
 <p>The release provides the following bug fixes and improvements:</p>
 
 <h2 id="web-authentication">Web Authentication</h2>
-
-<p>Drill 1.5 extends Drill user authentication to the Web Console and underlying REST API so administrators can control the extent of access to the Web Console and REST API client applications. Configuration docs are available <a href="/docs/configuring-web-console-and-rest-api-security/">here</a>.  </p>
+<p>Drill 1.5 extends Drill user authentication to the Web Console and underlying REST API so administrators can control the extent of access to the Web Console and REST API client applications. Configuration docs are available <a href="/docs/configuring-web-console-and-rest-api-security/">here</a>.</p>
 
 <h2 id="kudu-support">Kudu Support</h2>
-
-<p>Drill now includes experimental support for querying the Apache Kudu (incubating) scalable columnar database.  </p>
+<p>Drill now includes experimental support for querying the Apache Kudu (incubating) scalable columnar database.</p>
 
 <h2 id="improved-memory-allocator">Improved Memory Allocator</h2>
-
-<p>Drill uses a new allocator that improves an operator’s use of direct memory and tracks the memory use more accurately. See the <a href="/docs/configuring-drill-memory/">Configuring Drill Memory</a> doc page for more info.  </p>
+<p>Drill uses a new allocator that improves an operator’s use of direct memory and tracks the memory use more accurately. See the <a href="/docs/configuring-drill-memory/">Configuring Drill Memory</a> doc page for more info.</p>
 
 <h2 id="configurable-caching-of-hive-metadata">Configurable Caching of Hive Metadata</h2>
-
 <p>You can now configure the TTL for the Hive metadata client cache depending on how frequently the Hive metadata is updated. See the <a href="/docs/hive-metadata-caching/">Hive Metadata Caching</a> doc page for more info.</p>
 
-<p>A complete list of JIRAs resolved in the 1.5.0 release can be found <a href="https://issues.apache.org/jira/secure/ReleaseNote.jspa?projectId=12313820&version=12332948">here</a>.</p>
+<p>A complete list of JIRAs resolved in the 1.5.0 release can be found <a href="https://issues.apache.org/jira/secure/ReleaseNote.jspa?projectId=12313820&amp;version=12332948">here</a>.</p>
 
   </article>
  <div id="disqus_thread"></div>
@@ -188,7 +184,7 @@
   <p class="push"></p>
 <div id="footer" class="mw">
 <div class="wrapper">
-Copyright © 2012-2014 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
+Copyright © 2012-2020 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
 Apache and the Apache feather logo are trademarks of The Apache Software Foundation. Other names appearing on the site may be trademarks of their respective owners.<br/><br/>
 </div>
 </div>
diff --git a/blog/2016/03/16/drill-1.6-released/index.html b/blog/2016/03/16/drill-1.6-released/index.html
index 742ad88..4a3f2bb 100644
--- a/blog/2016/03/16/drill-1.6-released/index.html
+++ b/blog/2016/03/16/drill-1.6-released/index.html
@@ -137,19 +137,18 @@
   <div class="addthis_sharing_toolbox"></div>
 
   <article class="post-content">
-    <p>Today, we&#39;re happy to announce the availability of Drill 1.6.0. You can download it <a href="https://drill.apache.org/download/">here</a>.</p>
+    <p>Today, we’re happy to announce the availability of Drill 1.6.0. You can download it <a href="https://drill.apache.org/download/">here</a>.</p>
 
 <p>The release provides the following bug fixes and improvements:</p>
 
 <h2 id="inbound-impersonation">Inbound Impersonation</h2>
-
 <p>This feature is useful in a multi-tier architecture where queries must run as the end user instead of the application user. See <a href="/docs/configuring-inbound-impersonation/">Configuring Inbound Impersonation</a>.</p>
 
 <h2 id="additional-custom-window-frames">Additional Custom Window Frames</h2>
+<p>The window function frame clause now supports additional custom frames. See <a href="/docs/sql-window-functions-introduction/#syntax">Window Function Syntax</a>.</p>
 
-<p>The window function frame clause now supports additional custom frames. See <a href="/docs/sql-window-functions-introduction/#syntax">Window Function Syntax</a>. </p>
+<p>A complete list of JIRAs resolved in the 1.6.0 release can be found <a href="https://issues.apache.org/jira/secure/ReleaseNote.jspa?version=12334766&amp;styleName=Html&amp;projectId=12313820&amp;Create=Create&amp;atl_token=A5KQ-2QAV-T4JA-FDED%7C9ec2112379f0ae5d2b67a8cbd2626bcde62b41cd%7Clout">here</a>.</p>
 
-<p>A complete list of JIRAs resolved in the 1.6.0 release can be found <a href="https://issues.apache.org/jira/secure/ReleaseNote.jspa?version=12334766&styleName=Html&projectId=12313820&Create=Create&atl_token=A5KQ-2QAV-T4JA-FDED%7C9ec2112379f0ae5d2b67a8cbd2626bcde62b41cd%7Clout">here</a>.</p>
 
   </article>
  <div id="disqus_thread"></div>
@@ -173,7 +172,7 @@
   <p class="push"></p>
 <div id="footer" class="mw">
 <div class="wrapper">
-Copyright © 2012-2014 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
+Copyright © 2012-2020 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
 Apache and the Apache feather logo are trademarks of The Apache Software Foundation. Other names appearing on the site may be trademarks of their respective owners.<br/><br/>
 </div>
 </div>
diff --git a/blog/2016/06/28/drill-1.7-released/index.html b/blog/2016/06/28/drill-1.7-released/index.html
index 12809bc..b93603d 100644
--- a/blog/2016/06/28/drill-1.7-released/index.html
+++ b/blog/2016/06/28/drill-1.7-released/index.html
@@ -137,23 +137,21 @@
   <div class="addthis_sharing_toolbox"></div>
 
   <article class="post-content">
-    <p>Today, we&#39;re happy to announce the availability of Drill 1.7.0. You can download it <a href="https://drill.apache.org/download/">here</a>.</p>
+    <p>Today, we’re happy to announce the availability of Drill 1.7.0. You can download it <a href="https://drill.apache.org/download/">here</a>.</p>
 
 <p>The release provides the following bug fixes and improvements:</p>
 
 <h2 id="monitoring-via-jmx">Monitoring via JMX</h2>
-
-<p>You can access Drill system-level metrics, collected by JMX, through the Metrics page in the Drill Web Console or a remote JMX monitoring tool. See <a href="https://drill.apache.org/docs/monitoring-metrics/">Monitoring Metrics</a>. </p>
+<p>You can access Drill system-level metrics, collected by JMX, through the Metrics page in the Drill Web Console or a remote JMX monitoring tool. See <a href="https://drill.apache.org/docs/monitoring-metrics/">Monitoring Metrics</a>.</p>
 
 <h2 id="hive-char-data-type">Hive CHAR Data Type</h2>
-
-<p>Drill automatically converts the Hive CHAR data type to VARCHAR. You no longer need to cast the Hive CHAR data type to VARCHAR when you query Hive tables. See <a href="https://drill.apache.org/docs/hive-to-drill-data-type-mapping/">Hive-to-Drill Data Type Mapping</a>.  </p>
+<p>Drill automatically converts the Hive CHAR data type to VARCHAR. You no longer need to cast the Hive CHAR data type to VARCHAR when you query Hive tables. See <a href="https://drill.apache.org/docs/hive-to-drill-data-type-mapping/">Hive-to-Drill Data Type Mapping</a>.</p>
 
 <h2 id="hbase">HBase</h2>
+<p>Drill now supports HBase 1.x.</p>
 
-<p>Drill now supports HBase 1.x. </p>
+<p>A complete list of JIRAs resolved in the 1.7.0 release can be found <a href="https://issues.apache.org/jira/secure/ReleaseNote.jspa?version=12334767&amp;styleName=&amp;projectId=12313820">here</a>.</p>
 
-<p>A complete list of JIRAs resolved in the 1.7.0 release can be found <a href="https://issues.apache.org/jira/secure/ReleaseNote.jspa?version=12334767&styleName=&projectId=12313820">here</a>.</p>
 
   </article>
  <div id="disqus_thread"></div>
@@ -177,7 +175,7 @@
   <p class="push"></p>
 <div id="footer" class="mw">
 <div class="wrapper">
-Copyright © 2012-2014 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
+Copyright © 2012-2020 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
 Apache and the Apache feather logo are trademarks of The Apache Software Foundation. Other names appearing on the site may be trademarks of their respective owners.<br/><br/>
 </div>
 </div>
diff --git a/blog/2016/08/30/drill-1.8-released/index.html b/blog/2016/08/30/drill-1.8-released/index.html
index dec4059..d0f9c0e 100644
--- a/blog/2016/08/30/drill-1.8-released/index.html
+++ b/blog/2016/08/30/drill-1.8-released/index.html
@@ -137,31 +137,27 @@
   <div class="addthis_sharing_toolbox"></div>
 
   <article class="post-content">
-    <p>Today, we&#39;re happy to announce the availability of Drill 1.8.0. You can download it <a href="https://drill.apache.org/download/">here</a>.</p>
+    <p>Today, we’re happy to announce the availability of Drill 1.8.0. You can download it <a href="https://drill.apache.org/download/">here</a>.</p>
 
 <p>The release provides the following bug fixes and improvements:</p>
 
 <h2 id="metadata-cache-pruning">Metadata Cache Pruning</h2>
-
-<p>Drill now applies partition pruning to the metadata cache file. See <a href="https://drill.apache.org/docs/partition-pruning-introduction/">Partition Pruning Introduction</a> and <a href="https://drill.apache.org/docs/optimizing-parquet-metadata-reading/">Optimizing Parquet Metadata Reading</a>. </p>
+<p>Drill now applies partition pruning to the metadata cache file. See <a href="https://drill.apache.org/docs/partition-pruning-introduction/">Partition Pruning Introduction</a> and <a href="https://drill.apache.org/docs/optimizing-parquet-metadata-reading/">Optimizing Parquet Metadata Reading</a>.</p>
 
 <h2 id="if-exists-support">IF EXISTS Support</h2>
-
 <p>You can include the new IF EXISTS parameter with the DROP TABLE and DROP VIEW commands to prevent Drill from returning error messages when a table or view does not exist. See <a href="https://drill.apache.org/docs/drop-table/">DROP TABLE</a> and <a href="https://drill.apache.org/docs/drop-view/">DROP VIEW</a>.</p>
 
 <h2 id="describe-schema-command">DESCRIBE SCHEMA Command</h2>
-
-<p>Drill now supports the DESCRIBE SCHEMA command which provides schema properties for storage plugin configurations and workspaces. See <a href="https://drill.apache.org/docs/describe/">DESCRIBE</a>.  </p>
+<p>Drill now supports the DESCRIBE SCHEMA command which provides schema properties for storage plugin configurations and workspaces. See <a href="https://drill.apache.org/docs/describe/">DESCRIBE</a>.</p>
 
 <h2 id="multi-byte-delimiter-support">Multi-Byte Delimiter Support</h2>
-
-<p>Drill now supports multi-byte delimiters for text files, such as \r\n. See <a href="https://drill.apache.org/docs/plugin-configuration-basics/#list-of-attributes-and-definitions">List of Attributes and Definitions</a>.  </p>
+<p>Drill now supports multi-byte delimiters for text files, such as \r\n. See <a href="https://drill.apache.org/docs/plugin-configuration-basics/#list-of-attributes-and-definitions">List of Attributes and Definitions</a>.</p>
 
 <h2 id="filter-selectivity-estimate-parameters">Filter Selectivity Estimate Parameters</h2>
+<p>New parameters set the minimum filter selectivity estimate to increase the parallelization of the major fragment performing a join. See <a href="https://drill.apache.org/docs/configuration-options-introduction/#system-options">System Options</a>.</p>
 
-<p>New parameters set the minimum filter selectivity estimate to increase the parallelization of the major fragment performing a join. See <a href="https://drill.apache.org/docs/configuration-options-introduction/#system-options">System Options</a>. </p>
+<p>A complete list of JIRAs resolved in the 1.8.0 release can be found <a href="https://issues.apache.org/jira/secure/ReleaseNote.jspa?version=12334768&amp;styleName=Html&amp;projectId=12313820&amp;Create=Create&amp;atl_token=A5KQ-2QAV-T4JA-FDED%7Ce8d020149d9a6082481af301e563adbe35c76a87%7Clout">here</a>.</p>
 
-<p>A complete list of JIRAs resolved in the 1.8.0 release can be found <a href="https://issues.apache.org/jira/secure/ReleaseNote.jspa?version=12334768&styleName=Html&projectId=12313820&Create=Create&atl_token=A5KQ-2QAV-T4JA-FDED%7Ce8d020149d9a6082481af301e563adbe35c76a87%7Clout">here</a>.</p>
 
   </article>
  <div id="disqus_thread"></div>
@@ -185,7 +181,7 @@
   <p class="push"></p>
 <div id="footer" class="mw">
 <div class="wrapper">
-Copyright © 2012-2014 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
+Copyright © 2012-2020 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
 Apache and the Apache feather logo are trademarks of The Apache Software Foundation. Other names appearing on the site may be trademarks of their respective owners.<br/><br/>
 </div>
 </div>
diff --git a/blog/2016/11/29/drill-1.9-released/index.html b/blog/2016/11/29/drill-1.9-released/index.html
index eb72117..f579881 100644
--- a/blog/2016/11/29/drill-1.9-released/index.html
+++ b/blog/2016/11/29/drill-1.9-released/index.html
@@ -137,27 +137,24 @@
   <div class="addthis_sharing_toolbox"></div>
 
   <article class="post-content">
-    <p>Today, we&#39;re happy to announce the availability of Drill 1.9.0. You can download it <a href="https://drill.apache.org/download/">here</a>.</p>
+    <p>Today, we’re happy to announce the availability of Drill 1.9.0. You can download it <a href="https://drill.apache.org/download/">here</a>.</p>
 
 <p>The release provides the following bug fixes and improvements:</p>
 
 <h2 id="asynchronous-parquet-reader">Asynchronous Parquet Reader</h2>
-
-<p>The new asynchronous Parquet reader feature improves the performance of the Parquet Scan operator by increasing the speed at which the Parquet reader scans, decompresses, and decodes data. See <a href="/docs/asynchronous-parquet-reader/">Asynchronous Parquet Reader</a>. </p>
+<p>The new asynchronous Parquet reader feature improves the performance of the Parquet Scan operator by increasing the speed at which the Parquet reader scans, decompresses, and decodes data. See <a href="/docs/asynchronous-parquet-reader/">Asynchronous Parquet Reader</a>.</p>
 
 <h2 id="parquet-filter-pushdown">Parquet Filter Pushdown</h2>
-
 <p>The new Parquet filter pushdown feature optimizes Drill’s performance by pruning extraneous data from a Parquet file to reduce the amount of data that Drill scans and reads when a query on a Parquet file contains a filter expression. See <a href="/docs/parquet-filter-pushdown/">Parquet Filter Pushdown</a>.</p>
 
 <h2 id="dynamic-udf-support">Dynamic UDF Support</h2>
-
-<p>The new Dynamic UDF feature enables users to register and unregister UDFs on their own using the new CREATE FUNCTION USING JAR and DROP FUNCTION USING JAR commands. See <a href="/docs/dynamic-udfs/">Dynamic UDFs</a>.  </p>
+<p>The new Dynamic UDF feature enables users to register and unregister UDFs on their own using the new CREATE FUNCTION USING JAR and DROP FUNCTION USING JAR commands. See <a href="/docs/dynamic-udfs/">Dynamic UDFs</a>.</p>
 
 <h2 id="httpd-format-plugin">HTTPD Format Plugin</h2>
+<p>The new HTTPD format plugin adds the capability to query HTTP web server logs natively and also includes parse_url() and parse_query() UDFs. The parse_url() UDF returns maps of the URL. The parse_query() UDF returns the query string.</p>
 
-<p>The new HTTPD format plugin adds the capability to query HTTP web server logs natively and also includes parse_url() and parse_query() UDFs. The parse_url() UDF returns maps of the URL. The parse_query() UDF returns the query string.  </p>
+<p>A complete list of JIRAs resolved in the 1.9.0 release can be found <a href="https://issues.apache.org/jira/secure/ReleaseNote.jspa?version=12337861&amp;styleName=Html&amp;projectId=12313820&amp;Create=Create&amp;atl_token=A5KQ-2QAV-T4JA-FDED%7Cedcc6294c1851bcd19a3686871e085181f755a91%7Clin">here</a>.</p>
 
-<p>A complete list of JIRAs resolved in the 1.9.0 release can be found <a href="https://issues.apache.org/jira/secure/ReleaseNote.jspa?version=12337861&styleName=Html&projectId=12313820&Create=Create&atl_token=A5KQ-2QAV-T4JA-FDED%7Cedcc6294c1851bcd19a3686871e085181f755a91%7Clin">here</a>.</p>
 
   </article>
  <div id="disqus_thread"></div>
@@ -181,7 +178,7 @@
   <p class="push"></p>
 <div id="footer" class="mw">
 <div class="wrapper">
-Copyright © 2012-2014 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
+Copyright © 2012-2020 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
 Apache and the Apache feather logo are trademarks of The Apache Software Foundation. Other names appearing on the site may be trademarks of their respective owners.<br/><br/>
 </div>
 </div>
diff --git a/blog/2017/03/15/drill-1.10-released/index.html b/blog/2017/03/15/drill-1.10-released/index.html
index adb3bc2..92a4ad7 100644
--- a/blog/2017/03/15/drill-1.10-released/index.html
+++ b/blog/2017/03/15/drill-1.10-released/index.html
@@ -137,31 +137,27 @@
   <div class="addthis_sharing_toolbox"></div>
 
   <article class="post-content">
-    <p>Today, we&#39;re happy to announce the availability of Drill 1.10.0. You can download it <a href="https://drill.apache.org/download/">here</a>.</p>
+    <p>Today, we’re happy to announce the availability of Drill 1.10.0. You can download it <a href="https://drill.apache.org/download/">here</a>.</p>
 
 <p>The release provides the following bug fixes and improvements:</p>
 
 <h2 id="cttas">CTTAS</h2>
-
-<p>The CREATE TEMPORARY TABLE AS (CTTAS) command stores the results of a query in a temporary table. See <a href="/docs/create-temporary-table-as-cttas/">CTTAS</a>. </p>
+<p>The CREATE TEMPORARY TABLE AS (CTTAS) command stores the results of a query in a temporary table. See <a href="/docs/create-temporary-table-as-cttas/">CTTAS</a>.</p>
 
 <h2 id="improved-fault-tolerance">Improved Fault Tolerance</h2>
-
-<p>An optional <code>tries=&lt;value&gt;</code> parameter included in the JDBC connection string indicates the number of unique drillbits to which the client can try to connect. See <a href="/docs/using-the-jdbc-driver/#using-the-jdbc-url-format-for-a-direct-drillbit-connection">Using the JDBC URL Format for a Direct Drillbit Connection</a>.    </p>
+<p>An optional <code class="language-plaintext highlighter-rouge">tries=&lt;value&gt;</code> parameter included in the JDBC connection string indicates the number of unique drillbits to which the client can try to connect. See <a href="/docs/using-the-jdbc-driver/#using-the-jdbc-url-format-for-a-direct-drillbit-connection">Using the JDBC URL Format for a Direct Drillbit Connection</a>.</p>
 
 <h2 id="drill-version-and-statistics-in-web-console">Drill Version and Statistics in Web Console</h2>
-
-<p>The Web Console displays additional query profile statistics and the Drill version running on each Drill node in the cluster. See <a href="/docs/identifying-multiple-drill-versions-in-a-cluster">Identifying Multiple Drill Versions in a Cluster</a>.  </p>
+<p>The Web Console displays additional query profile statistics and the Drill version running on each Drill node in the cluster. See <a href="/docs/identifying-multiple-drill-versions-in-a-cluster">Identifying Multiple Drill Versions in a Cluster</a>.</p>
 
 <h2 id="implicit-interpretation-of-int96">Implicit Interpretation of INT96</h2>
-
-<p>Drill implicitly interprets the INT96 timestamp data type in Parquet files when the new <code>store.parquet.reader.int96_as_timestamp</code> option is enabled. See <a href="/docs/parquet-format/#about-int96-support">About INT96 Support</a>.</p>
+<p>Drill implicitly interprets the INT96 timestamp data type in Parquet files when the new <code class="language-plaintext highlighter-rouge">store.parquet.reader.int96_as_timestamp</code> option is enabled. See <a href="/docs/parquet-format/#about-int96-support">About INT96 Support</a>.</p>
 
 <h2 id="kerberos-authentication">Kerberos Authentication</h2>
-
 <p>Drill supports Kerberos authentication between the client and drillbit. See <a href="/docs/configuring-kerberos-authentication/">Configuring Kerberos Authentication</a> in the <a href="/docs/securing-drill/">Securing Drill</a> section.</p>
 
-<p>A complete list of JIRAs resolved in the 1.10.0 release can be found <a href="https://issues.apache.org/jira/secure/ReleaseNote.jspa?version=12338769&styleName=Html&projectId=12313820&Create=Create&atl_token=A5KQ-2QAV-T4JA-FDED%7C264858c85b35c3b8ac66b0573aa7e88ffa802c9d%7Clin">here</a>.</p>
+<p>A complete list of JIRAs resolved in the 1.10.0 release can be found <a href="https://issues.apache.org/jira/secure/ReleaseNote.jspa?version=12338769&amp;styleName=Html&amp;projectId=12313820&amp;Create=Create&amp;atl_token=A5KQ-2QAV-T4JA-FDED%7C264858c85b35c3b8ac66b0573aa7e88ffa802c9d%7Clin">here</a>.</p>
+
 
   </article>
  <div id="disqus_thread"></div>
@@ -185,7 +181,7 @@
   <p class="push"></p>
 <div id="footer" class="mw">
 <div class="wrapper">
-Copyright © 2012-2014 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
+Copyright © 2012-2020 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
 Apache and the Apache feather logo are trademarks of The Apache Software Foundation. Other names appearing on the site may be trademarks of their respective owners.<br/><br/>
 </div>
 </div>
diff --git a/blog/2017/07/31/drill-1.11-released/index.html b/blog/2017/07/31/drill-1.11-released/index.html
index 2d2d6dc..fc361db 100644
--- a/blog/2017/07/31/drill-1.11-released/index.html
+++ b/blog/2017/07/31/drill-1.11-released/index.html
@@ -137,64 +137,60 @@
   <div class="addthis_sharing_toolbox"></div>
 
   <article class="post-content">
-    <p>Today, we&#39;re happy to announce the availability of Drill 1.11.0. You can download it <a href="https://drill.apache.org/download/">here</a>.</p>
+    <p>Today, we’re happy to announce the availability of Drill 1.11.0. You can download it <a href="https://drill.apache.org/download/">here</a>.</p>
 
 <p>The release provides the following bug fixes and improvements:</p>
 
 <h2 id="cryptography-related-functions-drill-5634">Cryptography-Related Functions (DRILL-5634)</h2>
-
 <p>Drill provides the following cryptographic-related functions:</p>
 
 <ul>
-<li>aes_encrypt()</li>
-<li>aes_decrypt()</li>
-<li>md5()</li>
-<li>sha()</li>
-<li>sha1()</li>
-<li>sha2()<br></li>
+  <li>aes_encrypt()</li>
+  <li>aes_decrypt()</li>
+  <li>md5()</li>
+  <li>sha()</li>
+  <li>sha1()</li>
+  <li>sha2()</li>
 </ul>
 
 <h2 id="spill-to-disk-for-hash-aggregate-operator-drill-5457">Spill to Disk for Hash Aggregate Operator (DRILL-5457)</h2>
-
-<p>The Hash aggregate operator can spill data to disk in cases where the operation exceeds the set memory limit. Note that you may need to increase the default value of the <code>planner.memory.max_query_memory_per_node</code> option due to insufficient memory.      </p>
+<p>The Hash aggregate operator can spill data to disk in cases where the operation exceeds the set memory limit. Note that you may need to increase the default value of the <code class="language-plaintext highlighter-rouge">planner.memory.max_query_memory_per_node</code> option due to insufficient memory.</p>
 
 <h2 id="format-plugin-support-for-pcap-files-drill-5432">Format Plugin Support for PCAP Files (DRILL-5432)</h2>
+<p>A “pcap” format plugin enables Drill to read PCAP files. You must add the “pcap” format to the dfs storage plugin configuration, as shown:</p>
 
-<p>A “pcap” format plugin enables Drill to read PCAP files. You must add the “pcap” format to the dfs storage plugin configuration, as shown:  </p>
-<div class="highlight"><pre><code class="language-text" data-lang="text">   &quot;pcap&quot;: {
-          &quot;type&quot;: &quot;pcap&quot;
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>   "pcap": {
+          "type": "pcap"
         }   
-</code></pre></div>
-<h2 id="change-the-hdfs-block-size-for-parquet-files-drill-5379">Change the HDFS Block Size for Parquet Files (DRILL-5379)</h2>
+</code></pre></div></div>
 
-<p>The <code>store.parquet.writer.use_single_fs_block</code> option enables Drill to write a Parquet file as a single file system block without changing the file system default block size.</p>
+<h2 id="change-the-hdfs-block-size-for-parquet-files-drill-5379">Change the HDFS Block Size for Parquet Files (DRILL-5379)</h2>
+<p>The <code class="language-plaintext highlighter-rouge">store.parquet.writer.use_single_fs_block</code> option enables Drill to write a Parquet file as a single file system block without changing the file system default block size.</p>
 
 <h2 id="store-query-profiles-in-memory-drill-5481">Store Query Profiles in Memory (DRILL-5481)</h2>
-
-<p>The <code>drill.exec.profiles.store.inmemory</code> option enables Drill to store query profiles in memory instead of writing the query profiles to disk. The <code>drill.exec.profiles.store.capacity</code> option sets the maximum number of most recent profiles to retain in memory.  </p>
+<p>The <code class="language-plaintext highlighter-rouge">drill.exec.profiles.store.inmemory</code> option enables Drill to store query profiles in memory instead of writing the query profiles to disk. The <code class="language-plaintext highlighter-rouge">drill.exec.profiles.store.capacity</code> option sets the maximum number of most recent profiles to retain in memory.</p>
 
 <h2 id="configurable-ctas-directory-and-file-permissions-option-drill-5391">Configurable CTAS Directory and File Permissions Option (DRILL-5391)</h2>
-
-<p>You can use the <code>exec.persistent_table.umask</code> configuration option, at the system or session level, to modify permissions on directories and files that result from running the CTAS command. By default, the option is set to 002, which sets the default directory permissions to 775 and default file permissions to 664.   </p>
+<p>You can use the <code class="language-plaintext highlighter-rouge">exec.persistent_table.umask</code> configuration option, at the system or session level, to modify permissions on directories and files that result from running the CTAS command. By default, the option is set to 002, which sets the default directory permissions to 775 and default file permissions to 664.</p>
 
 <h2 id="support-for-network-encryption-drill-4335">Support for Network Encryption (DRILL-4335)</h2>
-
-<p>Drill can use SASL to support network encryption between the Drill client and drillbits, and also between drillbits.  </p>
+<p>Drill can use SASL to support network encryption between the Drill client and drillbits, and also between drillbits.</p>
 
 <h2 id="metadata-file-stores-relative-paths-drill-3867">Metadata file Stores Relative Paths (DRILL-3867)</h2>
-
-<p>Drill now stores the relative path in the metadata file (versus the absolute path), which enables you to move partitioned Parquet directories from one location in DFS to another without having to rebuild the Parquet metadata files; the metadata remains valid in the new location.  </p>
+<p>Drill now stores the relative path in the metadata file (versus the absolute path), which enables you to move partitioned Parquet directories from one location in DFS to another without having to rebuild the Parquet metadata files; the metadata remains valid in the new location.</p>
 
 <h2 id="support-for-additional-quoting-identifiers-drill-3510">Support for Additional Quoting Identifiers (DRILL-3510)</h2>
+<p>In addition to back ticks, the SQL parser in Drill can use double quotes and square brackets as identifier quotes. Use the <code class="language-plaintext highlighter-rouge">planner.parser.quoting_identifiers</code> configuration option, at the system or session level, to set the type of identifier quotes that the SQL parser in Drill uses, as shown:</p>
+
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>   ALTER SESSION SET planner.parser.quoting_identifiers = '"';  
+   ALTER SESSION SET planner.parser.quoting_identifiers = '[';  
+   ALTER SESSION SET planner.parser.quoting_identifiers = '`';  
+</code></pre></div></div>
+
+<p>The default setting is back ticks. The quoting identifier used in queries must match the setting. If you use another type of quoting identifier, Drill returns an error.</p>
 
-<p>In addition to back ticks, the SQL parser in Drill can use double quotes and square brackets as identifier quotes. Use the <code>planner.parser.quoting_identifiers</code> configuration option, at the system or session level, to set the type of identifier quotes that the SQL parser in Drill uses, as shown:  </p>
-<div class="highlight"><pre><code class="language-text" data-lang="text">   ALTER SESSION SET planner.parser.quoting_identifiers = &#39;&quot;&#39;;  
-   ALTER SESSION SET planner.parser.quoting_identifiers = &#39;[&#39;;  
-   ALTER SESSION SET planner.parser.quoting_identifiers = &#39;`&#39;;  
-</code></pre></div>
-<p>The default setting is back ticks. The quoting identifier used in queries must match the setting. If you use another type of quoting identifier, Drill returns an error.  </p>
+<p>You can find a complete list of JIRAs resolved in the 1.11.0 release <a href="https://issues.apache.org/jira/secure/ReleaseNote.jspa?projectId=12313820&amp;version=12339943">here</a>.</p>
 
-<p>You can find a complete list of JIRAs resolved in the 1.11.0 release <a href="https://issues.apache.org/jira/secure/ReleaseNote.jspa?projectId=12313820&version=12339943">here</a>.</p>
 
   </article>
  <div id="disqus_thread"></div>
@@ -218,7 +214,7 @@
   <p class="push"></p>
 <div id="footer" class="mw">
 <div class="wrapper">
-Copyright © 2012-2014 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
+Copyright © 2012-2020 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
 Apache and the Apache feather logo are trademarks of The Apache Software Foundation. Other names appearing on the site may be trademarks of their respective owners.<br/><br/>
 </div>
 </div>
diff --git a/blog/2017/12/15/drill-1.12-released/index.html b/blog/2017/12/15/drill-1.12-released/index.html
index b277a5c..ed1695a 100644
--- a/blog/2017/12/15/drill-1.12-released/index.html
+++ b/blog/2017/12/15/drill-1.12-released/index.html
@@ -137,67 +137,65 @@
   <div class="addthis_sharing_toolbox"></div>
 
   <article class="post-content">
-    <p>Today, we&#39;re happy to announce the availability of Drill 1.12.0. You can download it <a href="https://drill.apache.org/download/">here</a>.</p>
+    <p>Today, we’re happy to announce the availability of Drill 1.12.0. You can download it <a href="https://drill.apache.org/download/">here</a>.</p>
 
 <p>The release provides the following bug fixes and improvements:</p>
 
 <h2 id="kafka-and-opentsdb-storage-plugins-drill-4779-drill-5337">Kafka and OpenTSDB Storage Plugins (DRILL-4779, DRILL-5337)</h2>
-
-<p>You can configure Kafka and OpenTSDB as Drill data sources.  </p>
+<p>You can configure Kafka and OpenTSDB as Drill data sources.</p>
 
 <ul>
-<li>For Kafka storage plugin information, see the <a href="https://github.com/apache/drill/blob/master/contrib/storage-kafka/README.md">README</a> file. </li>
-<li><p>For OpenTSDB storage plugin information, see the <a href="https://github.com/apache/drill/blob/master/contrib/storage-opentsdb/README.md">README</a> file. Configure the OpenTSDB storage plugin in the Storage tab of the Drill Web Console, as shown:  </p>
-<div class="highlight"><pre><code class="language-text" data-lang="text">{
- &quot;storage&quot;: {
-   openTSDB: {
-     type: &quot;openTSDB&quot;,
-     connection: &quot;http://localhost:10000&quot;,
-     enabled: false
-      }
-   }
- }   
-</code></pre></div></li>
+  <li>For Kafka storage plugin information, see the <a href="https://github.com/apache/drill/blob/master/contrib/storage-kafka/README.md">README</a> file.</li>
+  <li>
+    <p>For OpenTSDB storage plugin information, see the <a href="https://github.com/apache/drill/blob/master/contrib/storage-opentsdb/README.md">README</a> file. Configure the OpenTSDB storage plugin in the Storage tab of the Drill Web Console, as shown:</p>
+
+    <div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>  {
+   "storage": {
+     openTSDB: {
+       type: "openTSDB",
+       connection: "http://localhost:10000",
+       enabled: false
+        }
+     }
+   }   
+</code></pre></div>    </div>
+  </li>
 </ul>
 
 <h2 id="queue-based-memory-assignment-for-buffering-operators-throttling-drill-5716">Queue-Based Memory Assignment for Buffering Operators (Throttling) (DRILL-5716)</h2>
-
-<p>Throttling limits the number of concurrent queries that run to prevent queries from failing with out-of-memory errors. When you enable throttling, you configure the number of concurrent queries that can run and the resource requirements for each query. Drill calculates the amount of memory to assign per query per node. See <a href="/docs/throttling/">Throttling</a> for more information. </p>
+<p>Throttling limits the number of concurrent queries that run to prevent queries from failing with out-of-memory errors. When you enable throttling, you configure the number of concurrent queries that can run and the resource requirements for each query. Drill calculates the amount of memory to assign per query per node. See <a href="/docs/throttling/">Throttling</a> for more information.</p>
 
 <h2 id="networking-functions">Networking Functions</h2>
-
-<p>Drill supports the following networking functions to facilitate network analysis using Drill:  </p>
+<p>Drill supports the following networking functions to facilitate network analysis using Drill:</p>
 
 <ul>
-<li><code>inet_aton(&lt;ip&gt;)</code>: Converts an IPv4 address into an integer</li>
-<li><code>inet_ntoa( &lt;int&gt;)</code>: Converts an integer IP into dotted decimal notation</li>
-<li><code>in_network( &lt;ip&gt;,&lt;cidr&gt; )</code>: Returns true if the IP address is in the given CIDR block</li>
-<li><code>address_count( &lt;cidr&gt; )</code>: Returns the number of IPs in a given CIDR block</li>
-<li><code>broadcast_address( &lt;cidr&gt; )</code>: Returns the broadcast address for a given CIDR block</li>
-<li><code>netmask(&lt;cidr&gt; )</code>: Returns the netmask for a given CIDR block</li>
-<li><code>low_address(&lt;cidr&gt;)</code>: Returns the first address in a given CIDR block</li>
-<li><code>high_address(&lt;cidr&gt;)</code>: Returns the last address in a given CIDR block</li>
-<li><code>url_encode( &lt;url&gt; )</code>: Returns a URL encoded string</li>
-<li><code>url_decode( &lt;url&gt; )</code>: Decodes ``a URL encoded string</li>
-<li><code>is_valid_IP(&lt;ip&gt;)</code>: Returns true if the IP is a valid IP address</li>
-<li><code>is_private_ip(&lt;ip&gt;)</code>: Returns true if the IP is a private IPv4 address</li>
-<li><code>is_valid_IPv4(&lt;ip&gt;)</code>: Returns true if the IP is a valid IPv4 address</li>
-<li><code>is_valid_IPv6(&lt;ip&gt;)</code>: Returns true if the IP is a valid IPv6 address<br></li>
+  <li><code class="language-plaintext highlighter-rouge">inet_aton(&lt;ip&gt;)</code>: Converts an IPv4 address into an integer</li>
+  <li><code class="language-plaintext highlighter-rouge">inet_ntoa( &lt;int&gt;)</code>: Converts an integer IP into dotted decimal notation</li>
+  <li><code class="language-plaintext highlighter-rouge">in_network( &lt;ip&gt;,&lt;cidr&gt; )</code>: Returns true if the IP address is in the given CIDR block</li>
+  <li><code class="language-plaintext highlighter-rouge">address_count( &lt;cidr&gt; )</code>: Returns the number of IPs in a given CIDR block</li>
+  <li><code class="language-plaintext highlighter-rouge">broadcast_address( &lt;cidr&gt; )</code>: Returns the broadcast address for a given CIDR block</li>
+  <li><code class="language-plaintext highlighter-rouge">netmask(&lt;cidr&gt; )</code>: Returns the netmask for a given CIDR block</li>
+  <li><code class="language-plaintext highlighter-rouge">low_address(&lt;cidr&gt;)</code>: Returns the first address in a given CIDR block</li>
+  <li><code class="language-plaintext highlighter-rouge">high_address(&lt;cidr&gt;)</code>: Returns the last address in a given CIDR block</li>
+  <li><code class="language-plaintext highlighter-rouge">url_encode( &lt;url&gt; )</code>: Returns a URL encoded string</li>
+  <li><code class="language-plaintext highlighter-rouge">url_decode( &lt;url&gt; )</code>: Decodes ``a URL encoded string</li>
+  <li><code class="language-plaintext highlighter-rouge">is_valid_IP(&lt;ip&gt;)</code>: Returns true if the IP is a valid IP address</li>
+  <li><code class="language-plaintext highlighter-rouge">is_private_ip(&lt;ip&gt;)</code>: Returns true if the IP is a private IPv4 address</li>
+  <li><code class="language-plaintext highlighter-rouge">is_valid_IPv4(&lt;ip&gt;)</code>: Returns true if the IP is a valid IPv4 address</li>
+  <li><code class="language-plaintext highlighter-rouge">is_valid_IPv6(&lt;ip&gt;)</code>: Returns true if the IP is a valid IPv6 address</li>
 </ul>
 
 <h2 id="ssl-support">SSL Support</h2>
-
-<p>Drill supports SSL to encrypt data passed between the Drill client and Drillbit. SSL also provides one-way authentication through which the Drill client verifies the identity of the Drillbit. The SASL feature in Drill provides authentication and an option to encrypt data, however the encryption feature is not available when using Plain authentication. If you need to use Plain authentication (certain BI tools only use Plain authentication), you can enable SSL to encrypt data. You can h [...]
+<p>Drill supports SSL to encrypt data passed between the Drill client and Drillbit. SSL also provides one-way authentication through which the Drill client verifies the identity of the Drillbit. The SASL feature in Drill provides authentication and an option to encrypt data, however the encryption feature is not available when using Plain authentication. If you need to use Plain authentication (certain BI tools only use Plain authentication), you can enable SSL to encrypt data. You can h [...]
 
 <h2 id="network-encryption-support">Network Encryption Support</h2>
-
-<p>Drill 1.10 provided authentication support through Plain and Kerberos authentication mechanisms to authenticate the Drill client to Drillbit and Drillbit to Drillbit communication channels. Drill 1.11 extends that support to include encryption. Drill uses the Kerberos mechanism over the SASL framework to encrypt the communication channels. </p>
+<p>Drill 1.10 provided authentication support through Plain and Kerberos authentication mechanisms to authenticate the Drill client to Drillbit and Drillbit to Drillbit communication channels. Drill 1.11 extends that support to include encryption. Drill uses the Kerberos mechanism over the SASL framework to encrypt the communication channels.</p>
 
 <h2 id="access-to-paths-outside-the-current-workspace-drill-5964">Access to Paths Outside the Current Workspace (DRILL-5964)</h2>
-
 <p>A new parameter, allowAccessOutsideWorkspace, in the dfs storage plugin configuration prevents users from accessing paths outside the root of a workspace. The default value for the parameter is false. Set the parameter to true to allow users access outside of a workspace. If existing storage plugin configurations do not specify the parameter, users cannot access paths outside the configured workspaces.</p>
 
-<p>You can find a complete list of JIRAs resolved in the 1.12.0 release <a href="https://issues.apache.org/jira/secure/ReleaseNote.jspa?version=12341087&styleName=Html&projectId=12313820&Create=Create&atl_token=A5KQ-2QAV-T4JA-FDED%7Cd194b12b906cd370f36d15e8af60a94592b89038%7Clin">here</a>.</p>
+<p>You can find a complete list of JIRAs resolved in the 1.12.0 release <a href="https://issues.apache.org/jira/secure/ReleaseNote.jspa?version=12341087&amp;styleName=Html&amp;projectId=12313820&amp;Create=Create&amp;atl_token=A5KQ-2QAV-T4JA-FDED%7Cd194b12b906cd370f36d15e8af60a94592b89038%7Clin">here</a>.</p>
+
 
   </article>
  <div id="disqus_thread"></div>
@@ -221,7 +219,7 @@
   <p class="push"></p>
 <div id="footer" class="mw">
 <div class="wrapper">
-Copyright © 2012-2014 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
+Copyright © 2012-2020 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
 Apache and the Apache feather logo are trademarks of The Apache Software Foundation. Other names appearing on the site may be trademarks of their respective owners.<br/><br/>
 </div>
 </div>
diff --git a/blog/2018/02/09/running-sql-queries-on-amazon-s3/index.html b/blog/2018/02/09/running-sql-queries-on-amazon-s3/index.html
index 3cb980e..b0782f6 100644
--- a/blog/2018/02/09/running-sql-queries-on-amazon-s3/index.html
+++ b/blog/2018/02/09/running-sql-queries-on-amazon-s3/index.html
@@ -137,17 +137,17 @@
   <div class="addthis_sharing_toolbox"></div>
 
   <article class="post-content">
-    <p>The functionality and sheer usefulness of Drill is growing fast.  If you&#39;re a user of some of the popular BI tools out there like Tableau or SAP Lumira, now is a good time to take a look at how Drill can make your life easier, especially if  you&#39;re faced with the task of quickly getting a handle on large sets of unstructured data.  With schema generated on the fly, you can save a lot of time and headaches by running SQL queries on the data where it rests without knowing mu [...]
+    <p>The functionality and sheer usefulness of Drill is growing fast.  If you’re a user of some of the popular BI tools out there like Tableau or SAP Lumira, now is a good time to take a look at how Drill can make your life easier, especially if  you’re faced with the task of quickly getting a handle on large sets of unstructured data.  With schema generated on the fly, you can save a lot of time and headaches by running SQL queries on the data where it rests without knowing much about [...]
 
-<p>If you&#39;re more of a visual person, you can skip this article entirely and <a href="https://www.youtube.com/watch?v=w8gZ2nn_ZUQ">go straight to a video</a> I put together that walks through an end-to-end example with Tableau.  This example is easily extended to other BI tools, as the steps are identical on the Drill side.</p>
+<p>If you’re more of a visual person, you can skip this article entirely and <a href="https://www.youtube.com/watch?v=w8gZ2nn_ZUQ">go straight to a video</a> I put together that walks through an end-to-end example with Tableau.  This example is easily extended to other BI tools, as the steps are identical on the Drill side.</p>
 
 <p>At a high level, configuring Drill to access S3 bucket data is accomplished with the following steps on each node running a drillbit.</p>
 
 <ul>
-<li>Download and install the <a href="http://www.jets3t.org/">JetS3t</a> JAR files and enable them.</li>
-<li>Add your S3 credentials in the relevant XML configuration file.</li>
-<li>Configure and enable the S3 storage plugin through the Drill web interface.</li>
-<li>Connect your BI tool of choice and query away.</li>
+  <li>Download and install the <a href="http://www.jets3t.org/">JetS3t</a> JAR files and enable them.</li>
+  <li>Add your S3 credentials in the relevant XML configuration file.</li>
+  <li>Configure and enable the S3 storage plugin through the Drill web interface.</li>
+  <li>Connect your BI tool of choice and query away.</li>
 </ul>
 
 <p>Consult the <a href="https://cwiki.apache.org/confluence/display/DRILL/Architectural+Overview">Architectural Overview</a> for a refresher on the architecture of Drill.</p>
@@ -159,16 +159,21 @@
 <h2 id="configuration-steps">Configuration Steps</h2>
 
 <p>To connect Drill to S3, all of the drillbit nodes will need to access code in the JetS3t library developed by Amazon.  As of this writing, 0.9.2 is the latest version but you might want to check <a href="https://jets3t.s3.amazonaws.com/toolkit/toolkit.html">the main page</a> to see if anything has been updated.  Be sure to get version 0.9.2 or later as earlier versions have a bug relating to reading Parquet data.</p>
-<div class="highlight"><pre><code class="language-bash" data-lang="bash">wget http://bitbucket.org/jmurty/jets3t/downloads/jets3t-0.9.2.zip
-cp jets3t-0.9.2/jars/jets3t-0.9.2.jar <span class="nv">$DRILL_HOME</span>/jars/3rdparty
-</code></pre></div>
+
+<div class="language-bash highlighter-rouge"><div class="highlight"><pre class="highlight"><code>wget http://bitbucket.org/jmurty/jets3t/downloads/jets3t-0.9.2.zip
+<span class="nb">cp </span>jets3t-0.9.2/jars/jets3t-0.9.2.jar <span class="nv">$DRILL_HOME</span>/jars/3rdparty
+</code></pre></div></div>
+
 <p>Next, enable the plugin by editing the file:</p>
-<div class="highlight"><pre><code class="language-bash" data-lang="bash"><span class="nv">$DRILL_HOME</span>/bin/hadoop_excludes.txt
-</code></pre></div>
-<p>and removing the line <code>jets3t</code>.</p>
 
-<p>Drill will need to know your S3 credentials in order to access data there. These credentials will need to be placed in the core-site.xml file for your installation.  If you already have a core-site.xml file configured for your environment, add the following parameters to it, otherwise create the file from scratch.  If you do end up creating it from scratch you will need to wrap these parameters with <code>&lt;configuration&gt;</code> and <code>&lt;/configuration&gt;</code>.</p>
-<div class="highlight"><pre><code class="language-xml" data-lang="xml"><span class="nt">&lt;property&gt;</span>
+<div class="language-bash highlighter-rouge"><div class="highlight"><pre class="highlight"><code><span class="nv">$DRILL_HOME</span>/bin/hadoop_excludes.txt
+</code></pre></div></div>
+
+<p>and removing the line <code class="language-plaintext highlighter-rouge">jets3t</code>.</p>
+
+<p>Drill will need to know your S3 credentials in order to access data there. These credentials will need to be placed in the core-site.xml file for your installation.  If you already have a core-site.xml file configured for your environment, add the following parameters to it, otherwise create the file from scratch.  If you do end up creating it from scratch you will need to wrap these parameters with <code class="language-plaintext highlighter-rouge">&lt;configuration&gt;</code> and <c [...]
+
+<div class="language-xml highlighter-rouge"><div class="highlight"><pre class="highlight"><code><span class="nt">&lt;property&gt;</span>
   <span class="nt">&lt;name&gt;</span>fs.s3.awsAccessKeyId<span class="nt">&lt;/name&gt;</span>
   <span class="nt">&lt;value&gt;</span>ID<span class="nt">&lt;/value&gt;</span>
 <span class="nt">&lt;/property&gt;</span>
@@ -187,27 +192,28 @@ cp jets3t-0.9.2/jars/jets3t-0.9.2.jar <span class="nv">$DRILL_HOME</span>/jars/3
   <span class="nt">&lt;name&gt;</span>fs.s3n.awsSecretAccessKey<span class="nt">&lt;/name&gt;</span>
   <span class="nt">&lt;value&gt;</span>SECRET<span class="nt">&lt;/value&gt;</span>
 <span class="nt">&lt;/property&gt;</span>
-</code></pre></div>
+</code></pre></div></div>
+
 <p>The steps so far give Drill enough information to connect to the S3 service.  Remember, you have to do this on all the nodes running drillbit.</p>
 
-<p>Next, let&#39;s go into the Drill web interface and enable the S3 storage plugin.  In this case you only need to connect to <strong>one</strong> of the nodes because Drill&#39;s configuration is synchronized across the cluster.  Complete the following steps:</p>
+<p>Next, let’s go into the Drill web interface and enable the S3 storage plugin.  In this case you only need to connect to <strong>one</strong> of the nodes because Drill’s configuration is synchronized across the cluster.  Complete the following steps:</p>
 
 <ol>
-<li>Point your browser to <code>http://&lt;host&gt;:8047</code></li>
-<li>Select the &#39;Storage&#39; tab.</li>
-<li>A good starting configuration for S3 can be entirely the same as the <code>dfs</code> plugin, except the connection parameter is changed to <code>s3n://bucket</code>.  So first select the <code>Update</code> button for <code>dfs</code>, then select the text area and copy it into the clipboard (on Windows, ctrl-A, ctrl-C works).</li>
-<li>Press <code>Back</code>, then create a new plugin by typing the name into the <code>New Storage Plugin</code>, then press <code>Create</code>.  You can choose any name, but a good convention is to use <code>s3-&lt;bucketname&gt;</code> so you can easily identify it later.</li>
-<li>In the configuration area, paste the configuration you just grabbed from &#39;dfs&#39;.  Change the line <code>connection: &quot;file:///&quot;</code> to <code>connection: &quot;s3n://&lt;bucket&gt;&quot;</code>.</li>
-<li>Click <code>Update</code>.  You should see a message that indicates success.</li>
+  <li>Point your browser to <code class="language-plaintext highlighter-rouge">http://&lt;host&gt;:8047</code></li>
+  <li>Select the ‘Storage’ tab.</li>
+  <li>A good starting configuration for S3 can be entirely the same as the <code class="language-plaintext highlighter-rouge">dfs</code> plugin, except the connection parameter is changed to <code class="language-plaintext highlighter-rouge">s3n://bucket</code>.  So first select the <code class="language-plaintext highlighter-rouge">Update</code> button for <code class="language-plaintext highlighter-rouge">dfs</code>, then select the text area and copy it into the clipboard (on Windows, [...]
+  <li>Press <code class="language-plaintext highlighter-rouge">Back</code>, then create a new plugin by typing the name into the <code class="language-plaintext highlighter-rouge">New Storage Plugin</code>, then press <code class="language-plaintext highlighter-rouge">Create</code>.  You can choose any name, but a good convention is to use <code class="language-plaintext highlighter-rouge">s3-&lt;bucketname&gt;</code> so you can easily identify it later.</li>
+  <li>In the configuration area, paste the configuration you just grabbed from ‘dfs’.  Change the line <code class="language-plaintext highlighter-rouge">connection: "file:///"</code> to <code class="language-plaintext highlighter-rouge">connection: "s3n://&lt;bucket&gt;"</code>.</li>
+  <li>Click <code class="language-plaintext highlighter-rouge">Update</code>.  You should see a message that indicates success.</li>
 </ol>
 
-<p>Note: Make sure the URI has scheme &quot;s3n&quot;, not &quot;s3&quot;. It will not work with &quot;s3&quot;.</p>
+<p>Note: Make sure the URI has scheme “s3n”, not “s3”. It will not work with “s3”.</p>
 
 <p>At this point you can run queries on the data directly and you have a couple of options on how you want to access it.  You can use Drill Explorer and create a custom view (based on an SQL query) that you can then access in Tableau or other BI tools, or just use Drill directly from within the tool.</p>
 
 <p>You may want to check out the <a href="http://www.youtube.com/watch?v=jNUsprJNQUg">Tableau demo</a>.</p>
 
-<p>With just a few lines of configuration, you&#39;ve just opened the vast world of data available in the Amazon cloud and reduced the amount of work you have to do in advance to access data stored there with SQL.  There are even some <a href="https://aws.amazon.com/datasets">public datasets</a> available directly on S3 that are great for experimentation.</p>
+<p>With just a few lines of configuration, you’ve just opened the vast world of data available in the Amazon cloud and reduced the amount of work you have to do in advance to access data stored there with SQL.  There are even some <a href="https://aws.amazon.com/datasets">public datasets</a> available directly on S3 that are great for experimentation.</p>
 
 <p>Happy Drilling!</p>
 
@@ -233,7 +239,7 @@ cp jets3t-0.9.2/jars/jets3t-0.9.2.jar <span class="nv">$DRILL_HOME</span>/jars/3
   <p class="push"></p>
 <div id="footer" class="mw">
 <div class="wrapper">
-Copyright © 2012-2014 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
+Copyright © 2012-2020 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
 Apache and the Apache feather logo are trademarks of The Apache Software Foundation. Other names appearing on the site may be trademarks of their respective owners.<br/><br/>
 </div>
 </div>
diff --git a/blog/2018/03/18/drill-1.13-released/index.html b/blog/2018/03/18/drill-1.13-released/index.html
index 1c5af44..3a026ee 100644
--- a/blog/2018/03/18/drill-1.13-released/index.html
+++ b/blog/2018/03/18/drill-1.13-released/index.html
@@ -137,36 +137,35 @@
   <div class="addthis_sharing_toolbox"></div>
 
   <article class="post-content">
-    <p>Today, we&#39;re happy to announce the availability of Drill 1.13.0. You can download it <a href="https://drill.apache.org/download/">here</a>.</p>
+    <p>Today, we’re happy to announce the availability of Drill 1.13.0. You can download it <a href="https://drill.apache.org/download/">here</a>.</p>
 
 <p>The release provides the following bug fixes and improvements:</p>
 
 <h2 id="ability-to-run-drill-under-yarn-drill-1170">Ability to Run Drill Under YARN (DRILL-1170)</h2>
-
 <p>You can run Drill as a YARN application (<a href="/docs/drill-on-yarn/">Drill-on-YARN</a>) if you want Drill to work alongside other applications, such as Hadoop and Spark, in a YARN-managed cluster. YARN assigns resources, such as memory and CPU, to applications in the cluster and eliminates the manual steps associated with installation and resource allocation for stand-alone applications in a multi-tenant environment. YARN automatically deploys (localizes) the Drill software onto ea [...]
 
 <h2 id="spnego-support-drill-5425">SPNEGO Support (DRILL-5425)</h2>
 
-<p>You can use SPNEGO to extend Kerberos authentication to Web applications through HTTP. </p>
+<p>You can use SPNEGO to extend Kerberos authentication to Web applications through HTTP.</p>
 
 <h2 id="sql-syntax-support-drill-5868">SQL Syntax Support (DRILL-5868)</h2>
+<p>Query syntax appears highlighted in the Drill Web Console. In addition to syntax highlighting, auto-complete is supported in all SQL editors, including the Edit Query tab within an existing profile to rerun the query. For browsers like Chrome, you can type Ctrl+Space for a drop-down list and then use arrow keys for navigating through options. An auto-complete feature that specifies Drill keywords and functions, and the ability to write SQL from templates using snippets.</p>
 
-<p>Query syntax appears highlighted in the Drill Web Console. In addition to syntax highlighting, auto-complete is supported in all SQL editors, including the Edit Query tab within an existing profile to rerun the query. For browsers like Chrome, you can type Ctrl+Space for a drop-down list and then use arrow keys for navigating through options. An auto-complete feature that specifies Drill keywords and functions, and the ability to write SQL from templates using snippets. </p>
-
-<h2 id="user-distribution-specific-configuration-checks-during-startup-drill-5741">User/Distribution-Specific Configuration Checks During Startup (DRILL-5741)</h2>
+<h2 id="userdistribution-specific-configuration-checks-during-startup-drill-5741">User/Distribution-Specific Configuration Checks During Startup (DRILL-5741)</h2>
 
-<p>You can define the maximum amount of cumulative memory allocated to the Drill process during startup through the <code>DRILLBIT_MAX_PROC_MEM</code> environment variable. For example, if you set <code>DRILLBIT_MAX_PROC_MEM to 40G</code>, the total amount of memory allocated to the following memory parameters cannot exceed 40G:  </p>
+<p>You can define the maximum amount of cumulative memory allocated to the Drill process during startup through the <code class="language-plaintext highlighter-rouge">DRILLBIT_MAX_PROC_MEM</code> environment variable. For example, if you set <code class="language-plaintext highlighter-rouge">DRILLBIT_MAX_PROC_MEM to 40G</code>, the total amount of memory allocated to the following memory parameters cannot exceed 40G:</p>
 
 <ul>
-<li><code>DRILL_HEAP=8G</code> </li>
-<li><code>DRILL_MAX_DIRECT_MEMORY=10G</code><br></li>
-<li><code>DRILLBIT_CODE_CACHE_SIZE=1024M</code></li>
+  <li><code class="language-plaintext highlighter-rouge">DRILL_HEAP=8G </code></li>
+  <li><code class="language-plaintext highlighter-rouge">DRILL_MAX_DIRECT_MEMORY=10G</code></li>
+  <li><code class="language-plaintext highlighter-rouge">DRILLBIT_CODE_CACHE_SIZE=1024M</code></li>
 </ul>
 
-<p>At startup, an auto-setup.sh script performs a check to see if these memory parameters are declared. If the parameters are declared, the script performs a check to verify that the cumulative memory of the parameters does not exceed the value specified by <code>DRILLBIT_MAX_PROC_MEM</code>. If the cumulative memory exceeds the total amount of memory defined by <code>DRILLBIT_MAX_PROC_MEM</code>, Drill returns an error message with instructions.</p>
+<p>At startup, an auto-setup.sh script performs a check to see if these memory parameters are declared. If the parameters are declared, the script performs a check to verify that the cumulative memory of the parameters does not exceed the value specified by <code class="language-plaintext highlighter-rouge">DRILLBIT_MAX_PROC_MEM</code>. If the cumulative memory exceeds the total amount of memory defined by <code class="language-plaintext highlighter-rouge">DRILLBIT_MAX_PROC_MEM</code>, D [...]
 
 <p>You can find a complete list of JIRAs resolved in the 1.13.0 release <a href="/docs/apache-drill-1-13-0-release-notes/">here</a>.</p>
 
+
   </article>
  <div id="disqus_thread"></div>
     <script type="text/javascript">
@@ -189,7 +188,7 @@
   <p class="push"></p>
 <div id="footer" class="mw">
 <div class="wrapper">
-Copyright © 2012-2014 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
+Copyright © 2012-2020 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
 Apache and the Apache feather logo are trademarks of The Apache Software Foundation. Other names appearing on the site may be trademarks of their respective owners.<br/><br/>
 </div>
 </div>
diff --git a/blog/2018/08/05/drill-1.14-released/index.html b/blog/2018/08/05/drill-1.14-released/index.html
index 3a1f69e..3387fe2 100644
--- a/blog/2018/08/05/drill-1.14-released/index.html
+++ b/blog/2018/08/05/drill-1.14-released/index.html
@@ -137,33 +137,29 @@
   <div class="addthis_sharing_toolbox"></div>
 
   <article class="post-content">
-    <p>Today, we&#39;re happy to announce the availability of Drill 1.14.0. You can download it <a href="https://drill.apache.org/download/">here</a>.</p>
+    <p>Today, we’re happy to announce the availability of Drill 1.14.0. You can download it <a href="https://drill.apache.org/download/">here</a>.</p>
 
 <p>The release provides the following bug fixes and improvements:</p>
 
 <h2 id="run-drill-in-a-docker-container-drill-6346">Run Drill in a Docker Container (DRILL-6346)</h2>
-
-<p>Running Drill in a Docker container is the simplest way to start using Drill; all you need is the Docker client installed on your machine. You simply run a Docker command, and your Docker client downloads the Drill Docker image from the apache-drill repository on Docker Hub and then brings up a container with Apache Drill running in embedded mode. See <a href="/docs/running-drill-on-docker/">Running Drill on Docker</a>.  </p>
+<p>Running Drill in a Docker container is the simplest way to start using Drill; all you need is the Docker client installed on your machine. You simply run a Docker command, and your Docker client downloads the Drill Docker image from the apache-drill repository on Docker Hub and then brings up a container with Apache Drill running in embedded mode. See <a href="/docs/running-drill-on-docker/">Running Drill on Docker</a>.</p>
 
 <h2 id="export-and-save-storage-plugin-configurations-drill-4580">Export and Save Storage Plugin Configurations (DRILL-4580)</h2>
-
-<p>You can export and save your storage plugin configurations from the Storage page in the Drill Web UI. See <a href="/docs/configuring-storage-plugins/#exporting-storage-plugin-configurations">Exporting Storage Plugin Configurations</a>.  </p>
+<p>You can export and save your storage plugin configurations from the Storage page in the Drill Web UI. See <a href="/docs/configuring-storage-plugins/#exporting-storage-plugin-configurations">Exporting Storage Plugin Configurations</a>.</p>
 
 <h2 id="manage-storage-plugin-configurations-in-a-configuration-file-drill-6494">Manage Storage Plugin Configurations in a Configuration File (DRILL-6494)</h2>
-
-<p>You can manage storage plugin configurations in the Drill configuration file,  storage-plugins-override.conf. When you provide the storage plugin configurations in the storage-plugins-override.conf file, Drill reads the file and configures the plugins during start-up. See <a href="https://drill.apache.org/docs/configuring-storage-plugins/#configuring-storage-plugins-with-the-storage-plugins-override.conf-file">Configuring Storage Plugins with the storage-plugins-override.conf File</a>.  </p>
+<p>You can manage storage plugin configurations in the Drill configuration file,  storage-plugins-override.conf. When you provide the storage plugin configurations in the storage-plugins-override.conf file, Drill reads the file and configures the plugins during start-up. See <a href="https://drill.apache.org/docs/configuring-storage-plugins/#configuring-storage-plugins-with-the-storage-plugins-override.conf-file">Configuring Storage Plugins with the storage-plugins-override.conf File</a>.</p>
 
 <h2 id="query-metadata-in-various-image-formats-drill-4364">Query Metadata in Various Image Formats (DRILL-4364)</h2>
-
-<p>The metadata format plugin is useful for querying a large number of image files stored in a distributed file system. You do not have to build a metadata repository in advance.<br>
-See <a href="/docs/image-metadata-format-plugin/">Image Metadata Format Plugin</a>.  </p>
+<p>The metadata format plugin is useful for querying a large number of image files stored in a distributed file system. You do not have to build a metadata repository in advance.<br />
+See <a href="/docs/image-metadata-format-plugin/">Image Metadata Format Plugin</a>.</p>
 
 <h2 id="set-hive-properties-at-the-session-level-drill-6575">Set Hive Properties at the Session Level (DRILL-6575)</h2>
-
-<p>The store.hive.conf.properties option enables you to specify Hive properties at the session level using the SET command. See <a href="/docs/hive-storage-plugin/#setting-hive-properties">Setting Hive Properties</a>.   </p>
+<p>The store.hive.conf.properties option enables you to specify Hive properties at the session level using the SET command. See <a href="/docs/hive-storage-plugin/#setting-hive-properties">Setting Hive Properties</a>.</p>
 
 <p>You can find a complete list of JIRAs resolved in the 1.14.0 release <a href="/docs/apache-drill-1-14-0-release-notes/">here</a>.</p>
 
+
   </article>
  <div id="disqus_thread"></div>
     <script type="text/javascript">
@@ -186,7 +182,7 @@ See <a href="/docs/image-metadata-format-plugin/">Image Metadata Format Plugin</
   <p class="push"></p>
 <div id="footer" class="mw">
 <div class="wrapper">
-Copyright © 2012-2014 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
+Copyright © 2012-2020 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
 Apache and the Apache feather logo are trademarks of The Apache Software Foundation. Other names appearing on the site may be trademarks of their respective owners.<br/><br/>
 </div>
 </div>
diff --git a/blog/2018/10/16/drill-developer-day/index.html b/blog/2018/10/16/drill-developer-day/index.html
index 3835256..9ad517b 100644
--- a/blog/2018/10/16/drill-developer-day/index.html
+++ b/blog/2018/10/16/drill-developer-day/index.html
@@ -138,6 +138,7 @@
 
   <article class="post-content">
     
+
   </article>
  <div id="disqus_thread"></div>
     <script type="text/javascript">
@@ -160,7 +161,7 @@
   <p class="push"></p>
 <div id="footer" class="mw">
 <div class="wrapper">
-Copyright © 2012-2014 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
+Copyright © 2012-2020 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
 Apache and the Apache feather logo are trademarks of The Apache Software Foundation. Other names appearing on the site may be trademarks of their respective owners.<br/><br/>
 </div>
 </div>
diff --git a/blog/2018/10/16/drill-user-meetup/index.html b/blog/2018/10/16/drill-user-meetup/index.html
index 2a35cda..c6b3d22 100644
--- a/blog/2018/10/16/drill-user-meetup/index.html
+++ b/blog/2018/10/16/drill-user-meetup/index.html
@@ -138,6 +138,7 @@
 
   <article class="post-content">
     
+
   </article>
  <div id="disqus_thread"></div>
     <script type="text/javascript">
@@ -160,7 +161,7 @@
   <p class="push"></p>
 <div id="footer" class="mw">
 <div class="wrapper">
-Copyright © 2012-2014 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
+Copyright © 2012-2020 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
 Apache and the Apache feather logo are trademarks of The Apache Software Foundation. Other names appearing on the site may be trademarks of their respective owners.<br/><br/>
 </div>
 </div>
diff --git a/blog/2018/12/31/drill-1.15-released/index.html b/blog/2018/12/31/drill-1.15-released/index.html
index d94fcb1..418efef 100644
--- a/blog/2018/12/31/drill-1.15-released/index.html
+++ b/blog/2018/12/31/drill-1.15-released/index.html
@@ -137,32 +137,28 @@
   <div class="addthis_sharing_toolbox"></div>
 
   <article class="post-content">
-    <p>Today, we&#39;re happy to announce the availability of Drill 1.15.0. You can download it <a href="https://drill.apache.org/download/">here</a>.</p>
+    <p>Today, we’re happy to announce the availability of Drill 1.15.0. You can download it <a href="https://drill.apache.org/download/">here</a>.</p>
 
 <p>The release provides the following bug fixes and improvements:</p>
 
-<h2 id="sqlline-upgrade-to-1-6-drill-3853">SQLLine upgrade to 1.6 (DRILL-3853)</h2>
-
+<h2 id="sqlline-upgrade-to-16-drill-3853">SQLLine upgrade to 1.6 (DRILL-3853)</h2>
 <p>An upgrade to SQLLine 1.6 includes additional commands and the ability to add a custom SQLLine configuration. See <a href="/docs/configuring-the-drill-shell/">Configuring the Drill Shell</a>.</p>
 
 <h2 id="index-support-drill-6381">Index support (DRILL-6381)</h2>
-
 <p>Drill can leverage indexes (primary or secondary) in data sources to create index-based query plans. An index-based query plan leverages indexes to access date instead of full table scans. Currently, Drill only supports indexes for the MapR-DB storage plugin. See <a href="/docs/querying-indexes/">Querying Indexes</a>.</p>
 
 <h2 id="ability-to-create-custom-acls-to-secure-znodes-drill-5671">Ability to create custom ACLs to secure znodes (DRILL-5671)</h2>
-
 <p>Drill uses ZooKeeper to store certain cluster-level configuration and query profile information in znodes. A znode is an internal data tree in ZooKeeper that stores coordination and execution related information. Starting in Drill 1.15, you can create a custom ACL (Access Control List) on the znodes to secure data. See <a href="/docs/configuring-custom-acls-to-secure-znodes/">Configuring Custom ACLs to Secure znodes</a>.</p>
 
 <h2 id="information_schema-files-table-drill-6680">INFORMATION_SCHEMA FILES table (DRILL-6680)</h2>
-
-<p>The INFORMATION_SCHEMA contains a FILES table that you can query for information about directories and files stored in the workspaces configured within your S3 and file system storage plugin configurations. See <a href="https://drill.apache.org/docs/querying-the-information-schema/#files">INFORMATION_SCHEMA</a>. </p>
+<p>The INFORMATION_SCHEMA contains a FILES table that you can query for information about directories and files stored in the workspaces configured within your S3 and file system storage plugin configurations. See <a href="https://drill.apache.org/docs/querying-the-information-schema/#files">INFORMATION_SCHEMA</a>.</p>
 
 <h2 id="system-functions-table-drill-3988">System functions table (DRILL-3988)</h2>
-
-<p>You can query the system functions table exposes the available SQL functions in Drill and also detects UDFs that have been dynamically loaded into Drill. See <a href="https://drill.apache.org/docs/querying-system-tables/#querying-the-functions-table">Querying the functions table</a>.  </p>
+<p>You can query the system functions table exposes the available SQL functions in Drill and also detects UDFs that have been dynamically loaded into Drill. See <a href="https://drill.apache.org/docs/querying-system-tables/#querying-the-functions-table">Querying the functions table</a>.</p>
 
 <p>You can find a complete list of improvements and JIRAs resolved in the 1.15.0 release <a href="/docs/apache-drill-1-15-0-release-notes/">here</a>.</p>
 
+
   </article>
  <div id="disqus_thread"></div>
     <script type="text/javascript">
@@ -185,7 +181,7 @@
   <p class="push"></p>
 <div id="footer" class="mw">
 <div class="wrapper">
-Copyright © 2012-2014 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
+Copyright © 2012-2020 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
 Apache and the Apache feather logo are trademarks of The Apache Software Foundation. Other names appearing on the site may be trademarks of their respective owners.<br/><br/>
 </div>
 </div>
diff --git a/blog/2019/05/02/drill-1.16-released/index.html b/blog/2019/05/02/drill-1.16-released/index.html
index 8b19187..c1d2bb3 100644
--- a/blog/2019/05/02/drill-1.16-released/index.html
+++ b/blog/2019/05/02/drill-1.16-released/index.html
@@ -137,34 +137,31 @@
   <div class="addthis_sharing_toolbox"></div>
 
   <article class="post-content">
-    <p>Today, we&#39;re happy to announce the availability of Drill 1.16.0. You can download it <a href="https://drill.apache.org/download/">here</a>.</p>
+    <p>Today, we’re happy to announce the availability of Drill 1.16.0. You can download it <a href="https://drill.apache.org/download/">here</a>.</p>
 
 <p>This release provides the following bug fixes and improvements:</p>
 
 <h2 id="table-statistics">Table Statistics</h2>
-
-<p>The <a href="/docs/analyze-table/">ANALYZE TABLE statement</a>  computes statistics and generates histograms for numeric data types.   </p>
+<p>The <a href="/docs/analyze-table/">ANALYZE TABLE statement</a>  computes statistics and generates histograms for numeric data types.</p>
 
 <h2 id="schema-provisioning-for-text-files">Schema Provisioning for Text Files</h2>
-
-<p>The <a href="/docs/create-or-replace-schema/">CREATE OR REPLACE SCHEMA command</a> defines a schema for text files. (In Drill 1.16, this feature is in preview status.)  </p>
+<p>The <a href="/docs/create-or-replace-schema/">CREATE OR REPLACE SCHEMA command</a> defines a schema for text files. (In Drill 1.16, this feature is in preview status.)</p>
 
 <h2 id="parquet-metadata-caching-improvements">Parquet Metadata Caching Improvements</h2>
-
-<p>The <a href="/docs/refresh-table-metadata/">REFRESH TABLE METADATA command</a> can generate metadata cache files for specific columns. </p>
+<p>The <a href="/docs/refresh-table-metadata/">REFRESH TABLE METADATA command</a> can generate metadata cache files for specific columns.</p>
 
 <h2 id="drill-web-ui-enhancements">Drill Web UI Enhancements</h2>
-
-<p>Enhancements include:<br>
-    - <a href="https://drill.apache.org/docs/configuring-storage-plugins/#exporting-storage-plugin-configurations">Storage plugin management improvements</a><br>
-    - <a href="/docs/query-profiles/#query-profile-warnings">Query progress indicators and warnings </a><br>
-    - Ability to <a href="/docs/planning-and-execution-options/#setting-an-auto-limit-on-the-number-of-rows-returned-for-result-sets">limit the result size for better UI response</a><br>
-    - Ability to <a href="/docs/query-profiles/#viewing-a-query-profile">sort the list of profiles in the Drill Web UI</a><br>
-    - <a href="/docs/starting-the-web-ui/#running-queries-from-the-web-ui">Display query state in query result page</a><br>
-    - <a href="https://drill.apache.org/docs/planning-and-execution-options/#setting-options-from-the-drill-web-ui">Button to reset the options filter</a>   </p>
+<p>Enhancements include: <br />
+	- <a href="https://drill.apache.org/docs/configuring-storage-plugins/#exporting-storage-plugin-configurations">Storage plugin management improvements</a><br />
+	- <a href="/docs/query-profiles/#query-profile-warnings">Query progress indicators and warnings </a> <br />
+	- Ability to <a href="/docs/planning-and-execution-options/#setting-an-auto-limit-on-the-number-of-rows-returned-for-result-sets">limit the result size for better UI response</a> <br />
+	- Ability to <a href="/docs/query-profiles/#viewing-a-query-profile">sort the list of profiles in the Drill Web UI</a> <br />
+	- <a href="/docs/starting-the-web-ui/#running-queries-from-the-web-ui">Display query state in query result page</a> <br />
+	- <a href="https://drill.apache.org/docs/planning-and-execution-options/#setting-options-from-the-drill-web-ui">Button to reset the options filter</a></p>
 
 <p>You can find a complete list of improvements and JIRAs resolved in the 1.16.0 release <a href="/docs/apache-drill-1-16-0-release-notes/">here</a>.</p>
 
+
   </article>
  <div id="disqus_thread"></div>
     <script type="text/javascript">
@@ -187,7 +184,7 @@
   <p class="push"></p>
 <div id="footer" class="mw">
 <div class="wrapper">
-Copyright © 2012-2014 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
+Copyright © 2012-2020 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
 Apache and the Apache feather logo are trademarks of The Apache Software Foundation. Other names appearing on the site may be trademarks of their respective owners.<br/><br/>
 </div>
 </div>
diff --git a/blog/2019/05/02/drill-user-meetup/index.html b/blog/2019/05/02/drill-user-meetup/index.html
index a9f5e94..a6e0352 100644
--- a/blog/2019/05/02/drill-user-meetup/index.html
+++ b/blog/2019/05/02/drill-user-meetup/index.html
@@ -138,6 +138,7 @@
 
   <article class="post-content">
     
+
   </article>
  <div id="disqus_thread"></div>
     <script type="text/javascript">
@@ -160,7 +161,7 @@
   <p class="push"></p>
 <div id="footer" class="mw">
 <div class="wrapper">
-Copyright © 2012-2014 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
+Copyright © 2012-2020 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
 Apache and the Apache feather logo are trademarks of The Apache Software Foundation. Other names appearing on the site may be trademarks of their respective owners.<br/><br/>
 </div>
 </div>
diff --git a/blog/2019/12/26/drill-1.17-released/index.html b/blog/2019/12/26/drill-1.17-released/index.html
index b783aab..f3e4b19 100644
--- a/blog/2019/12/26/drill-1.17-released/index.html
+++ b/blog/2019/12/26/drill-1.17-released/index.html
@@ -137,39 +137,37 @@
   <div class="addthis_sharing_toolbox"></div>
 
   <article class="post-content">
-    <p>Today, we&#39;re happy to announce the availability of Drill 1.17.0. You can download it <a href="https://drill.apache.org/download/">here</a>.  </p>
+    <p>Today, we’re happy to announce the availability of Drill 1.17.0. You can download it <a href="https://drill.apache.org/download/">here</a>.</p>
 
 <p>This release provides the following bug fixes and improvements:</p>
 
 <h2 id="hive-complex-types-support">Hive complex types support:</h2>
-
 <ul>
-<li><a href='https://issues.apache.org/jira/browse/DRILL-7251'>DRILL-7251</a> - Read Hive array without nulls</li>
-<li><a href='https://issues.apache.org/jira/browse/DRILL-7252'>DRILL-7252</a> - Read Hive map using Dict<K,V> vector</li>
-<li><a href='https://issues.apache.org/jira/browse/DRILL-7253'>DRILL-7253</a> - Read Hive struct without nulls</li>
-<li><a href='https://issues.apache.org/jira/browse/DRILL-7254'>DRILL-7254</a> - Read Hive union without nulls</li>
-<li><a href='https://issues.apache.org/jira/browse/DRILL-7268'>DRILL-7268</a> - Read Hive array with parquet native reader</li>
+  <li><a href="https://issues.apache.org/jira/browse/DRILL-7251">DRILL-7251</a> - Read Hive array without nulls</li>
+  <li><a href="https://issues.apache.org/jira/browse/DRILL-7252">DRILL-7252</a> - Read Hive map using Dict&lt;K,V&gt; vector</li>
+  <li><a href="https://issues.apache.org/jira/browse/DRILL-7253">DRILL-7253</a> - Read Hive struct without nulls</li>
+  <li><a href="https://issues.apache.org/jira/browse/DRILL-7254">DRILL-7254</a> - Read Hive union without nulls</li>
+  <li><a href="https://issues.apache.org/jira/browse/DRILL-7268">DRILL-7268</a> - Read Hive array with parquet native reader</li>
 </ul>
 
 <h2 id="new-format-plugins-support">New format plugins support:</h2>
-
 <ul>
-<li><a href='https://issues.apache.org/jira/browse/DRILL-4303'>DRILL-4303</a> - ESRI Shapefile (shp) format plugin</li>
-<li><a href='https://issues.apache.org/jira/browse/DRILL-7177'>DRILL-7177</a> - Format Plugin for Excel Files</li>
-<li><a href='https://issues.apache.org/jira/browse/DRILL-6096'>DRILL-6096</a> - Provide mechanisms to specify field delimiters and quoted text for TextRecordWriter<br></li>
-<li>Parquet format improvements, including runtime row group pruning (<a href='https://issues.apache.org/jira/browse/DRILL-7062'>DRILL-7062</a>), empty parquet creation (<a href='https://issues.apache.org/jira/browse/DRILL-7156'>DRILL-7156</a>), reading (<a href='https://issues.apache.org/jira/browse/DRILL-4517'>DRILL-4517</a>) support, and more.</li>
+  <li><a href="https://issues.apache.org/jira/browse/DRILL-4303">DRILL-4303</a> - ESRI Shapefile (shp) format plugin</li>
+  <li><a href="https://issues.apache.org/jira/browse/DRILL-7177">DRILL-7177</a> - Format Plugin for Excel Files</li>
+  <li><a href="https://issues.apache.org/jira/browse/DRILL-6096">DRILL-6096</a> - Provide mechanisms to specify field delimiters and quoted text for TextRecordWriter</li>
+  <li>Parquet format improvements, including runtime row group pruning (<a href="https://issues.apache.org/jira/browse/DRILL-7062">DRILL-7062</a>), empty parquet creation (<a href="https://issues.apache.org/jira/browse/DRILL-7156">DRILL-7156</a>), reading (<a href="https://issues.apache.org/jira/browse/DRILL-4517">DRILL-4517</a>) support, and more.</li>
 </ul>
 
 <h2 id="metastore-support">Metastore support:</h2>
-
 <ul>
-<li><a href='https://issues.apache.org/jira/browse/DRILL-7272'>DRILL-7272</a> - Implement Drill Iceberg Metastore plugin</li>
-<li><a href='https://issues.apache.org/jira/browse/DRILL-7273'>DRILL-7273</a> - Create operator for handling metadata</li>
-<li><a href='https://issues.apache.org/jira/browse/DRILL-7357'>DRILL-7357</a> - Expose Drill Metastore data through INFORMATION_SCHEMA</li>
+  <li><a href="https://issues.apache.org/jira/browse/DRILL-7272">DRILL-7272</a> - Implement Drill Iceberg Metastore plugin</li>
+  <li><a href="https://issues.apache.org/jira/browse/DRILL-7273">DRILL-7273</a> - Create operator for handling metadata</li>
+  <li><a href="https://issues.apache.org/jira/browse/DRILL-7357">DRILL-7357</a> - Expose Drill Metastore data through INFORMATION_SCHEMA</li>
 </ul>
 
 <p>You can find a complete list of improvements and JIRAs resolved in the 1.17.0 release <a href="/docs/apache-drill-1-17-0-release-notes/">here</a>.</p>
 
+
   </article>
  <div id="disqus_thread"></div>
     <script type="text/javascript">
@@ -192,7 +190,7 @@
   <p class="push"></p>
 <div id="footer" class="mw">
 <div class="wrapper">
-Copyright © 2012-2014 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
+Copyright © 2012-2020 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
 Apache and the Apache feather logo are trademarks of The Apache Software Foundation. Other names appearing on the site may be trademarks of their respective owners.<br/><br/>
 </div>
 </div>
diff --git a/blog/2019/12/26/drill-1.17-released/index.html b/blog/2020/09/04/drill-1.18-released/index.html
similarity index 75%
copy from blog/2019/12/26/drill-1.17-released/index.html
copy to blog/2020/09/04/drill-1.18-released/index.html
index b783aab..b445963 100644
--- a/blog/2019/12/26/drill-1.17-released/index.html
+++ b/blog/2020/09/04/drill-1.18-released/index.html
@@ -7,7 +7,7 @@
 <meta name=viewport content="width=device-width, initial-scale=1">
 
 
-<title>Drill 1.17 Released - Apache Drill</title>
+<title>Drill 1.18 Released - Apache Drill</title>
 
 <link href="//maxcdn.bootstrapcdn.com/font-awesome/4.3.0/css/font-awesome.min.css" rel="stylesheet" type="text/css"/>
 <link href='//fonts.googleapis.com/css?family=PT+Sans' rel='stylesheet' type='text/css'/>
@@ -123,7 +123,7 @@
 <div class="post int_text">
   <header class="post-header">
     <div class="int_title">
-      <h1 class="post-title">Drill 1.17 Released</h1>
+      <h1 class="post-title">Drill 1.18 Released</h1>
     </div>
     <p class="post-meta">
     
@@ -131,44 +131,42 @@
       
       <strong>Author:</strong> Bridget Bevens (Committer, MapR Technologies)<br />
     
-<strong>Date:</strong> Dec 26, 2019
+<strong>Date:</strong> Sep 4, 2020
 </p>
   </header>
   <div class="addthis_sharing_toolbox"></div>
 
   <article class="post-content">
-    <p>Today, we&#39;re happy to announce the availability of Drill 1.17.0. You can download it <a href="https://drill.apache.org/download/">here</a>.  </p>
+    <p>Today, we’re happy to announce the availability of Drill 1.18.0. You can download it <a href="https://drill.apache.org/download/">here</a>.</p>
 
 <p>This release provides the following bug fixes and improvements:</p>
 
 <h2 id="hive-complex-types-support">Hive complex types support:</h2>
-
 <ul>
-<li><a href='https://issues.apache.org/jira/browse/DRILL-7251'>DRILL-7251</a> - Read Hive array without nulls</li>
-<li><a href='https://issues.apache.org/jira/browse/DRILL-7252'>DRILL-7252</a> - Read Hive map using Dict<K,V> vector</li>
-<li><a href='https://issues.apache.org/jira/browse/DRILL-7253'>DRILL-7253</a> - Read Hive struct without nulls</li>
-<li><a href='https://issues.apache.org/jira/browse/DRILL-7254'>DRILL-7254</a> - Read Hive union without nulls</li>
-<li><a href='https://issues.apache.org/jira/browse/DRILL-7268'>DRILL-7268</a> - Read Hive array with parquet native reader</li>
+  <li><a href="https://issues.apache.org/jira/browse/DRILL-7251">DRILL-7251</a> - Read Hive array without nulls</li>
+  <li><a href="https://issues.apache.org/jira/browse/DRILL-7252">DRILL-7252</a> - Read Hive map using Dict&lt;K,V&gt; vector</li>
+  <li><a href="https://issues.apache.org/jira/browse/DRILL-7253">DRILL-7253</a> - Read Hive struct without nulls</li>
+  <li><a href="https://issues.apache.org/jira/browse/DRILL-7254">DRILL-7254</a> - Read Hive union without nulls</li>
+  <li><a href="https://issues.apache.org/jira/browse/DRILL-7268">DRILL-7268</a> - Read Hive array with parquet native reader</li>
 </ul>
 
 <h2 id="new-format-plugins-support">New format plugins support:</h2>
-
 <ul>
-<li><a href='https://issues.apache.org/jira/browse/DRILL-4303'>DRILL-4303</a> - ESRI Shapefile (shp) format plugin</li>
-<li><a href='https://issues.apache.org/jira/browse/DRILL-7177'>DRILL-7177</a> - Format Plugin for Excel Files</li>
-<li><a href='https://issues.apache.org/jira/browse/DRILL-6096'>DRILL-6096</a> - Provide mechanisms to specify field delimiters and quoted text for TextRecordWriter<br></li>
-<li>Parquet format improvements, including runtime row group pruning (<a href='https://issues.apache.org/jira/browse/DRILL-7062'>DRILL-7062</a>), empty parquet creation (<a href='https://issues.apache.org/jira/browse/DRILL-7156'>DRILL-7156</a>), reading (<a href='https://issues.apache.org/jira/browse/DRILL-4517'>DRILL-4517</a>) support, and more.</li>
+  <li><a href="https://issues.apache.org/jira/browse/DRILL-4303">DRILL-4303</a> - ESRI Shapefile (shp) format plugin</li>
+  <li><a href="https://issues.apache.org/jira/browse/DRILL-7177">DRILL-7177</a> - Format Plugin for Excel Files</li>
+  <li><a href="https://issues.apache.org/jira/browse/DRILL-6096">DRILL-6096</a> - Provide mechanisms to specify field delimiters and quoted text for TextRecordWriter</li>
+  <li>Parquet format improvements, including runtime row group pruning (<a href="https://issues.apache.org/jira/browse/DRILL-7062">DRILL-7062</a>), empty parquet creation (<a href="https://issues.apache.org/jira/browse/DRILL-7156">DRILL-7156</a>), reading (<a href="https://issues.apache.org/jira/browse/DRILL-4517">DRILL-4517</a>) support, and more.</li>
 </ul>
 
 <h2 id="metastore-support">Metastore support:</h2>
-
 <ul>
-<li><a href='https://issues.apache.org/jira/browse/DRILL-7272'>DRILL-7272</a> - Implement Drill Iceberg Metastore plugin</li>
-<li><a href='https://issues.apache.org/jira/browse/DRILL-7273'>DRILL-7273</a> - Create operator for handling metadata</li>
-<li><a href='https://issues.apache.org/jira/browse/DRILL-7357'>DRILL-7357</a> - Expose Drill Metastore data through INFORMATION_SCHEMA</li>
+  <li><a href="https://issues.apache.org/jira/browse/DRILL-7272">DRILL-7272</a> - Implement Drill Iceberg Metastore plugin</li>
+  <li><a href="https://issues.apache.org/jira/browse/DRILL-7273">DRILL-7273</a> - Create operator for handling metadata</li>
+  <li><a href="https://issues.apache.org/jira/browse/DRILL-7357">DRILL-7357</a> - Expose Drill Metastore data through INFORMATION_SCHEMA</li>
 </ul>
 
-<p>You can find a complete list of improvements and JIRAs resolved in the 1.17.0 release <a href="/docs/apache-drill-1-17-0-release-notes/">here</a>.</p>
+<p>You can find a complete list of improvements and JIRAs resolved in the 1.18.0 release <a href="/docs/apache-drill-1-18-0-release-notes/">here</a>.</p>
+
 
   </article>
  <div id="disqus_thread"></div>
@@ -192,7 +190,7 @@
   <p class="push"></p>
 <div id="footer" class="mw">
 <div class="wrapper">
-Copyright © 2012-2014 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
+Copyright © 2012-2020 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
 Apache and the Apache feather logo are trademarks of The Apache Software Foundation. Other names appearing on the site may be trademarks of their respective owners.<br/><br/>
 </div>
 </div>
diff --git a/blog/2018/10/16/drill-user-meetup/index.html b/blog/2020/09/05/drill-1.18-released/index.html
similarity index 84%
copy from blog/2018/10/16/drill-user-meetup/index.html
copy to blog/2020/09/05/drill-1.18-released/index.html
index 2a35cda..36eea0e 100644
--- a/blog/2018/10/16/drill-user-meetup/index.html
+++ b/blog/2020/09/05/drill-1.18-released/index.html
@@ -7,7 +7,7 @@
 <meta name=viewport content="width=device-width, initial-scale=1">
 
 
-<title>Drill User Meetup 2018 - Apache Drill</title>
+<title>Drill 1.18 Released - Apache Drill</title>
 
 <link href="//maxcdn.bootstrapcdn.com/font-awesome/4.3.0/css/font-awesome.min.css" rel="stylesheet" type="text/css"/>
 <link href='//fonts.googleapis.com/css?family=PT+Sans' rel='stylesheet' type='text/css'/>
@@ -123,21 +123,33 @@
 <div class="post int_text">
   <header class="post-header">
     <div class="int_title">
-      <h1 class="post-title">Drill User Meetup 2018</h1>
+      <h1 class="post-title">Drill 1.18 Released</h1>
     </div>
     <p class="post-meta">
     
       
       
-      <strong>Author:</strong> Bridget Bevens (Committer, MapR Technologies)<br />
+      <strong>Author:</strong> Abhishek Girish (Committer, MapR Technologies)<br />
     
-<strong>Date:</strong> Oct 16, 2018
+<strong>Date:</strong> Sep 5, 2020
 </p>
   </header>
   <div class="addthis_sharing_toolbox"></div>
 
   <article class="post-content">
-    
+    <p>Today, we’re happy to announce the availability of Drill 1.18.0. You can download it <a href="https://drill.apache.org/download/">here</a>.</p>
+
+<h2 id="this-release-provides-the-following-new-features">This release provides the following new Features:</h2>
+
+<ul>
+  <li><a href="https://issues.apache.org/jira/browse/DRILL-6835">DRILL-6835</a> - Schema Provision using File / Table Function</li>
+  <li><a href="https://issues.apache.org/jira/browse/DRILL-7326">DRILL-7326</a> - Support repeated lists for CTAS parquet format</li>
+  <li><a href="https://issues.apache.org/jira/browse/DRILL-7343">DRILL-7343</a> - Add User-Agent UDFs to Drill</li>
+  <li><a href="https://issues.apache.org/jira/browse/DRILL-7374">DRILL-7374</a> - Support for IPV6 address</li>
+</ul>
+
+<p>You can find a complete list of improvements and JIRAs resolved in the 1.18.0 release <a href="/docs/apache-drill-1-18-0-release-notes/">here</a>.</p>
+
   </article>
  <div id="disqus_thread"></div>
     <script type="text/javascript">
@@ -160,7 +172,7 @@
   <p class="push"></p>
 <div id="footer" class="mw">
 <div class="wrapper">
-Copyright © 2012-2014 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
+Copyright © 2012-2020 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
 Apache and the Apache feather logo are trademarks of The Apache Software Foundation. Other names appearing on the site may be trademarks of their respective owners.<br/><br/>
 </div>
 </div>
diff --git a/blog/index.html b/blog/index.html
index 5dfee99..7e28891 100644
--- a/blog/index.html
+++ b/blog/index.html
@@ -124,6 +124,16 @@
 </div>
 
 <div class="int_text" align="left"><!-- previously: site.posts -->
+<p><a class="post-link" href="/blog/2020/09/05/drill-1.18-released/">Drill 1.18 Released</a><br/>
+<span class="post-date">Posted on Sep 5, 2020
+by Abhishek Girish</span>
+<br/>Apache Drill 1.18 has been released providing Drill Metadata management "Drill Metastore", Format Plugins for HDF5 and SPSS,  Storage Plugins for Generic HTTP REST APIs and Apache Druid,  Support for DICT type in RowSet Framework, Dynamic credit based flow control, Support for injecting BufferManager into UDF, Drill RDBMS Metastore"</p>
+<!-- previously: site.posts -->
+<p><a class="post-link" href="/blog/2020/09/04/drill-1.18-released/">Drill 1.18 Released</a><br/>
+<span class="post-date">Posted on Sep 4, 2020
+by Bridget Bevens</span>
+<br/>Apache Drill 1.18's highlights are&#58; Hadoop, Kafka, Sqlline, and Calcite upgrades, .zip compression, file/table function for schema provisioning, new UDFs, and more.</p>
+<!-- previously: site.posts -->
 <p><a class="post-link" href="/blog/2019/12/26/drill-1.17-released/">Drill 1.17 Released</a><br/>
 <span class="post-date">Posted on Dec 26, 2019
 by Bridget Bevens</span>
@@ -306,7 +316,7 @@ by Anil Kumar Batchu, Kamesh Bhallamudi</span>
   <p class="push"></p>
 <div id="footer" class="mw">
 <div class="wrapper">
-Copyright © 2012-2014 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
+Copyright © 2012-2020 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
 Apache and the Apache feather logo are trademarks of The Apache Software Foundation. Other names appearing on the site may be trademarks of their respective owners.<br/><br/>
 </div>
 </div>
diff --git a/community-resources/index.html b/community-resources/index.html
index 21d10be..e8417fb 100644
--- a/community-resources/index.html
+++ b/community-resources/index.html
@@ -126,36 +126,36 @@
 <div class="int_text" align="left"><h2 id="user-resources">User Resources</h2>
 
 <ul>
-<li><a href="/docs/">Documentation</a></li>
-<li>Twitter: <a href="https://twitter.com/ApacheDrill">@ApacheDrill</a></li>
-<li>Meetups:
-
-<ul>
-<li><a href="http://www.meetup.com/Bay-Area-Apache-Drill-User-Group/">Bay Area</a></li>
-<li><a href="http://www.meetup.com/New-York-Apache-Drill-Meetup/">New York</a></li>
-<li><a href="http://www.meetup.com/London-Apache-Drill/">London</a></li>
-<li><a href="http://drill.connpass.com/">Tokyo</a></li>
-</ul></li>
-<li>Presentations: <a href="http://www.slideshare.net/ApacheDrill/">SlideShare</a></li>
+  <li><a href="/docs/">Documentation</a></li>
+  <li>Twitter: <a href="https://twitter.com/ApacheDrill">@ApacheDrill</a></li>
+  <li>Meetups:
+    <ul>
+      <li><a href="http://www.meetup.com/Bay-Area-Apache-Drill-User-Group/">Bay Area</a></li>
+      <li><a href="http://www.meetup.com/New-York-Apache-Drill-Meetup/">New York</a></li>
+      <li><a href="http://www.meetup.com/London-Apache-Drill/">London</a></li>
+      <li><a href="http://drill.connpass.com/">Tokyo</a></li>
+    </ul>
+  </li>
+  <li>Presentations: <a href="http://www.slideshare.net/ApacheDrill/">SlideShare</a></li>
 </ul>
 
 <h2 id="developer-resources">Developer Resources</h2>
 
 <ul>
-<li>Issue tracker: <a href="https://issues.apache.org/jira/browse/DRILL/">JIRA</a></li>
-<li><a href="/docs/contribute-to-drill/">Contribute to Drill</a></li>
-<li>Hangout: A <a href="https://plus.google.com/hangouts/_/event/ci4rdiju8bv04a64efj5fedd0lc">bi-weekly Drill hangout</a> occurs every other Tuesday at 10 am PDT. For more information and hangout notes, see <a href="https://docs.google.com/document/d/1o2GvZUtJvKzN013JdM715ZBzhseT0VyZ9WgmLMeeUUk/edit?ts=5744c15c#heading=h.z8q6drmaybbj">Apache Drill Hangout Notes</a>.</li>
-<li>Source code: <a href="https://github.com/apache/drill">GitHub</a> </li>
+  <li>Issue tracker: <a href="https://issues.apache.org/jira/browse/DRILL/">JIRA</a></li>
+  <li><a href="/docs/contribute-to-drill/">Contribute to Drill</a></li>
+  <li>Hangout: A <a href="https://plus.google.com/hangouts/_/event/ci4rdiju8bv04a64efj5fedd0lc">bi-weekly Drill hangout</a> occurs every other Tuesday at 10 am PDT. For more information and hangout notes, see <a href="https://docs.google.com/document/d/1o2GvZUtJvKzN013JdM715ZBzhseT0VyZ9WgmLMeeUUk/edit?ts=5744c15c#heading=h.z8q6drmaybbj">Apache Drill Hangout Notes</a>.</li>
+  <li>Source code: <a href="https://github.com/apache/drill">GitHub</a></li>
 </ul>
 
 <h2 id="apache-software-foundation-resources">Apache Software Foundation Resources</h2>
 
 <ul>
-<li>Find out more about the Apache Software Foundation at <a href="http://www.apache.org/">http://www.apache.org/</a>.</li>
-<li>See the generous supporters of the Apache Software Foundation at <a href="http://www.apache.org/foundation/thanks.html">http://www.apache.org/foundation/thanks.html</a>.</li>
-<li>Find out how you can become a sponsor of the Apache Software Foundation at <a href="http://www.apache.org/foundation/sponsorship.html">http://www.apache.org/foundation/sponsorship.html</a>.</li>
-<li>Get Apache Software Foundation licensing information at <a href="http://www.apache.org/licenses/">http://www.apache.org/licenses/</a>.</li>
-<li>Report security vulnerabilities at <a href="http://www.apache.org/security/">http://www.apache.org/security/</a>. </li>
+  <li>Find out more about the Apache Software Foundation at <a href="http://www.apache.org/">http://www.apache.org/</a>.</li>
+  <li>See the generous supporters of the Apache Software Foundation at <a href="http://www.apache.org/foundation/thanks.html">http://www.apache.org/foundation/thanks.html</a>.</li>
+  <li>Find out how you can become a sponsor of the Apache Software Foundation at <a href="http://www.apache.org/foundation/sponsorship.html">http://www.apache.org/foundation/sponsorship.html</a>.</li>
+  <li>Get Apache Software Foundation licensing information at <a href="http://www.apache.org/licenses/">http://www.apache.org/licenses/</a>.</li>
+  <li>Report security vulnerabilities at <a href="http://www.apache.org/security/">http://www.apache.org/security/</a>.</li>
 </ul>
 </div>
 
@@ -163,7 +163,7 @@
   <p class="push"></p>
 <div id="footer" class="mw">
 <div class="wrapper">
-Copyright © 2012-2014 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
+Copyright © 2012-2020 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
 Apache and the Apache feather logo are trademarks of The Apache Software Foundation. Other names appearing on the site may be trademarks of their respective owners.<br/><br/>
 </div>
 </div>
diff --git a/data/index.html b/data/index.html
index b1056cf..719dff9 100644
--- a/data/index.html
+++ b/data/index.html
@@ -372,12 +372,6 @@
     "relative_path": "_docs/connect-a-data-source/plugins/040-file-system-storage-plugin.md"
 },
 {
-    "url": "/docs/logfile-plugin/",
-    "title": "Logfile Plugin",
-    "parent": "Connect a Data Source",
-    "relative_path": "_docs/connect-a-data-source/plugins/041-logfile-plugin.md"
-},
-{
     "url": "/docs/hbase-storage-plugin/",
     "title": "HBase Storage Plugin",
     "parent": "Connect a Data Source",
@@ -414,12 +408,6 @@
     "relative_path": "_docs/connect-a-data-source/plugins/110-s3-storage-plugin.md"
 },
 {
-    "url": "/docs/httpd-format-plugin/",
-    "title": "HTTPD Format Plugin",
-    "parent": "Connect a Data Source",
-    "relative_path": "_docs/connect-a-data-source/plugins/111-httpd-format-plugin.md"
-},
-{
     "url": "/docs/opentsdb-storage-plugin/",
     "title": "OpenTSDB Storage Plugin",
     "parent": "Connect a Data Source",
@@ -432,28 +420,22 @@
     "relative_path": "_docs/connect-a-data-source/plugins/113-kafka-storage-plugin.md"
 },
 {
-    "url": "/docs/image-metadata-format-plugin/",
-    "title": "Image Metadata Format Plugin",
-    "parent": "Connect a Data Source",
-    "relative_path": "_docs/connect-a-data-source/plugins/114-image-metadata-format-plugin.md"
-},
-{
     "url": "/docs/azure-blob-storage-plugin/",
     "title": "Azure Blob Storage Plugin",
     "parent": "Connect a Data Source",
     "relative_path": "_docs/connect-a-data-source/plugins/115-azure-blob-storage-plugin.md"
 },
 {
-    "url": "/docs/syslog-format-plugin/",
-    "title": "Syslog Format Plugin",
+    "url": "/docs/druid-storage-plugin/",
+    "title": "Druid Storage Plugin",
     "parent": "Connect a Data Source",
-    "relative_path": "_docs/connect-a-data-source/plugins/116-sys-log-format-plugin.md"
+    "relative_path": "_docs/connect-a-data-source/plugins/120-druid-storage-plugin.md"
 },
 {
-    "url": "/docs/ltsv-format-plugin/",
-    "title": "LTSV Format Plugin",
+    "url": "/docs/http-storage-plugin/",
+    "title": "HTTP Storage Plugin",
     "parent": "Connect a Data Source",
-    "relative_path": "_docs/connect-a-data-source/plugins/117-ltsv-format-plugin.md"
+    "relative_path": "_docs/connect-a-data-source/plugins/125-http-storage-plugin.md"
 },
 {
     "url": "/docs/data-sources-and-file-formats-introduction/",
@@ -480,6 +462,12 @@
     "relative_path": "_docs/data-sources-and-file-formats/040-parquet-format.md"
 },
 {
+    "url": "/docs/logfile-plugin/",
+    "title": "Logfile Plugin",
+    "parent": "Data Sources and File Formats",
+    "relative_path": "_docs/data-sources-and-file-formats/041-logfile-plugin.md"
+},
+{
     "url": "/docs/json-data-model/",
     "title": "JSON Data Model",
     "parent": "Data Sources and File Formats",
@@ -498,6 +486,54 @@
     "relative_path": "_docs/data-sources-and-file-formats/070-sequencefile-format.md"
 },
 {
+    "url": "/docs/httpd-format-plugin/",
+    "title": "HTTPD Format Plugin",
+    "parent": "Data Sources and File Formats",
+    "relative_path": "_docs/data-sources-and-file-formats/110-httpd-format-plugin.md"
+},
+{
+    "url": "/docs/image-metadata-format-plugin/",
+    "title": "Image Metadata Format Plugin",
+    "parent": "Data Sources and File Formats",
+    "relative_path": "_docs/data-sources-and-file-formats/111-image-metadata-format-plugin.md"
+},
+{
+    "url": "/docs/syslog-format-plugin/",
+    "title": "Syslog Format Plugin",
+    "parent": "Data Sources and File Formats",
+    "relative_path": "_docs/data-sources-and-file-formats/112-sys-log-format-plugin.md"
+},
+{
+    "url": "/docs/ltsv-format-plugin/",
+    "title": "LTSV Format Plugin",
+    "parent": "Data Sources and File Formats",
+    "relative_path": "_docs/data-sources-and-file-formats/113-ltsv-format-plugin.md"
+},
+{
+    "url": "/docs/spss-format-plugin/",
+    "title": "SPSS Format Plugin",
+    "parent": "Data Sources and File Formats",
+    "relative_path": "_docs/data-sources-and-file-formats/114-spss-format-plugin.md"
+},
+{
+    "url": "/docs/esri-shapefile-format-plugin/",
+    "title": "ESRI Shapefile Format Plugin",
+    "parent": "Data Sources and File Formats",
+    "relative_path": "_docs/data-sources-and-file-formats/115-esri-shapefile-format-plugin.md"
+},
+{
+    "url": "/docs/excel-format-plugin/",
+    "title": "Excel Format Plugin",
+    "parent": "Data Sources and File Formats",
+    "relative_path": "_docs/data-sources-and-file-formats/116-excel-format-plugin.md"
+},
+{
+    "url": "/docs/hdf5-format-plugin/",
+    "title": "HDF5 Format Plugin",
+    "parent": "Data Sources and File Formats",
+    "relative_path": "_docs/data-sources-and-file-formats/117-hdf5-format-plugin.md"
+},
+{
     "url": "/docs/develop-custom-functions-introduction/",
     "title": "Develop Custom Functions Introduction",
     "parent": "Develop Custom Functions",
@@ -816,12 +852,6 @@
     "relative_path": "_docs/install/installing-drill-in-embedded-mode/010-embedded-mode-prerequisites.md"
 },
 {
-    "url": "/docs/running-drill-on-docker/",
-    "title": "Running Drill on Docker",
-    "parent": "Installing Drill in Embedded Mode",
-    "relative_path": "_docs/install/installing-drill-in-embedded-mode/011-running-drill-on-docker.md"
-},
-{
     "url": "/docs/installing-drill-on-linux-and-mac-os-x/",
     "title": "Installing Drill on Linux and Mac OS X",
     "parent": "Installing Drill in Embedded Mode",
@@ -1398,6 +1428,12 @@
     "relative_path": "_docs/query-data/querying-indexes/060-verifying-index-use.md"
 },
 {
+    "url": "/docs/apache-drill-1-18-0-release-notes/",
+    "title": "Apache Drill 1.18.0 Release Notes",
+    "parent": "Release Notes",
+    "relative_path": "_docs/rn/003-1.18.0-rn.md"
+},
+{
     "url": "/docs/apache-drill-1-17-0-release-notes/",
     "title": "Apache Drill 1.17.0 Release Notes",
     "parent": "Release Notes",
@@ -1962,6 +1998,24 @@
     "relative_path": "_docs/sql-reference/sql-functions/063-cryptography-functions.md"
 },
 {
+    "url": "/docs/sql-dialect-compatibility-functions/",
+    "title": "SQL dialect compatibility functions",
+    "parent": "SQL Functions",
+    "relative_path": "_docs/sql-reference/sql-functions/070-sql-dialect-compat-functions.md"
+},
+{
+    "url": "/docs/gis-functions/",
+    "title": "GIS functions",
+    "parent": "SQL Functions",
+    "relative_path": "_docs/sql-reference/sql-functions/080-gis-functions.md"
+},
+{
+    "url": "/docs/time-series-analysis-functions/",
+    "title": "Time Series Analysis Functions",
+    "parent": "SQL Functions",
+    "relative_path": "_docs/sql-reference/sql-functions/090-time-series-analysis-functions.md"
+},
+{
     "url": "/docs/sql-window-functions-introduction/",
     "title": "SQL Window Functions Introduction",
     "parent": "SQL Window Functions",
diff --git a/docs/011-running-drill-on-docker.md b/docs/011-running-drill-on-docker.md
new file mode 100644
index 0000000..28ad327
--- /dev/null
+++ b/docs/011-running-drill-on-docker.md
@@ -0,0 +1,91 @@
+--
+title: "Running Drill on Docker"
+slug: "Running Drill on Docker"
+parent: "Installing Drill in Embedded Mode"
+---  
+
+Starting in Drill 1.14, you can run Drill in a [Docker container](https://www.docker.com/what-container#/package_software). Running Drill in a container is the simplest way to start using Drill; all you need is the Docker client installed on your machine. You simply run a Docker command, and your Docker client downloads the Drill Docker image from the [apache/drill](https://hub.docker.com/r/apache/drill) repository on [Docker Hub](https://docs.docker.com/docker-hub/) and brings up a cont [...]
+
+Currently, you can only run Drill in embedded mode in a Docker container. Embedded mode is when a single instance of Drill runs on a node or in a container. You do not have to perform any configuration tasks when Drill runs in embedded mode.  
+
+## Prerequisite  
+
+You must have the Docker client (version 18 or later) [installed on your machine](https://docs.docker.com/install/).  
+
+
+## Running Drill in a Docker Container  
+
+You can start and run a Docker container in detached mode or foreground mode. [Detached mode]({{site.baseurl}}/docs/running-drill-on-docker/#running-the-drill-docker-container-in-detached-mode) runs the container in the background. Foreground is the default mode. [Foreground mode]({{site.baseurl}}/docs/running-drill-on-docker/#running-the-drill-docker-container-in-foreground-mode) runs the Drill process in the container and attaches the console to Drill’s standard input, output, and stan [...]
+
+Whether you run the Docker container in detached or foreground mode, you start Drill in a container by issuing the docker `run` command with some options, as described in the following table: 
+
+ 
+|--------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| Option                   | Description                                                                                                                                                                                                                                                                                                     |
+|--------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `-i`                     | Keeps STDIN open. STDIN is standard input, an input stream where data is sent to and read by a program.                                                                                                                                                                                                         |
+| `-t`                     | Allocates a pseudo-tty (a shell).                                                                                                                                                                                                                                                                               |
+| `--name`                 | Identifies the container. If you do not use this   option to identify a name for the container, the daemon generates a container ID for you. When you use this option to identify a container name,   you can use the name to reference the container within a Docker network in   foreground or detached mode. |
+| `-p`                     | The TCP port for the Drill Web UI. If needed, you can   change this port using the `drill.exec.http.port` [start-up option]({{site.baseurl}}/docs/start-up-options/).                                                                                                                                           |
+| `apache/drill:<version>` | The Docker Hub repository and tag. In the following   example, `apache/drill` is   the repository and `1.17.0`   is the tag:     `apache/drill:1.17.0`.     The tag correlates with the version of Drill. When a new version of Drill   is available, you can use the new version as the tag.                   |
+| `bin/bash`               | Connects to the Drill container using a bash shell.                                                                                                                                                                                                                                                             |
+|--------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+
+### Running the Drill Docker Container in Foreground Mode  
+
+Open a terminal window (Command Prompt or PowerShell, but not PowerShell ISE) and then issue the following command and options to connect to SQLLine (the Drill shell):   
+
+       docker run -i --name drill-1.18.0 -p 8047:8047 -t apache/drill:1.18.0 /bin/bash
+
+When you issue the docker run command, the Drill process starts in a container. SQLLine prints a message, and the prompt appears:  
+
+       Apache Drill 1.18.0
+       "json ain't no thang"
+       apache drill>
+
+At the prompt, you can enter the following simple query to verify that Drill is running:  
+
+       SELECT version FROM sys.version;  
+
+### Running the Drill Docker Container in Detached Mode  
+
+Open a terminal window (Command Prompt or PowerShell, but not PowerShell ISE) and then issue the following commands and options to connect to SQLLine (the Drill shell):  
+
+**Note:** When you run the Drill Docker container in detached mode, you connect to SQLLine (the Drill shell) using drill-localhost.  
+
+       $ docker run -i --name drill-1.18.0 -p 8047:8047 --detach -t apache/drill:1.18.0 /bin/bash
+       <displays container ID>
+
+       $ docker exec -it drill-1.18.0 bash
+       <connects to container>
+
+       $ /opt/drill/bin/drill-localhost  
+
+After you issue the commands, the Drill process starts in a container. SQLLine prints a message, and the prompt appears:  
+
+       Apache Drill 1.18.0
+       "json ain't no thang"
+       apache drill>
+
+At the prompt, you can enter the following simple query to verify that Drill is running:  
+
+       SELECT version FROM sys.version;  
+
+## Querying Data  
+
+By default, you can only query files that are accessible within the container. For example, you can query the sample data packaged with Drill, as shown:  
+
+       SELECT first_name, last_name FROM cp.`employee.json` LIMIT 1;
+       |------------|-----------|
+       | first_name | last_name |
+       |------------|-----------|
+       | Sheri      | Nowmer    |
+       |------------|-----------|
+       1 row selected (0.256 seconds)  
+
+To query files outside of the container, you can configure [Docker volumes](https://docs.docker.com/storage/volumes/#start-a-service-with-volumes).  
+
+## Drill Web UI  
+
+You can access the Drill web UI at `http://localhost:8047` when the Drill Docker container is running. On Windows, you may need to specify the IP address of your system instead of using "localhost".
+
diff --git a/docs/configure-drill/securing-drill/070-configuring-user-security.md b/docs/070-configuring-user-security.md
similarity index 96%
rename from docs/configure-drill/securing-drill/070-configuring-user-security.md
rename to docs/070-configuring-user-security.md
index 8558a7a..af7fb8d 100644
--- a/docs/configure-drill/securing-drill/070-configuring-user-security.md
+++ b/docs/070-configuring-user-security.md
@@ -1,6 +1,6 @@
 c---
 title: "Configuring User Security"
-date: 2018-04-04 00:23:28 UTC
+slug: "Configuring User Security"
 parent: "Securing Drill"
 ---
 ## Authentication
@@ -34,7 +34,7 @@ By default, the highest security level is negotiated during the SASL handshake f
 
 The following table shows Drill client version compatibility with secure Drill clusters enabled with encryption. Drill 1.10 clients and lower do not support encryption and will not be allowed to connect to a drillbit with encryption enabled. 
 
-![compatEncrypt]({{site.baseurl}}/docs/img/client-encrypt-compatibility.png)
+![compatEncrypt]({{site.baseurl}}/images/docs/client-encrypt-compatibility.png)
 
 See *Client Encryption* in [Configuring Kerberos Security]({{site.baseurl}}/docs/configuring-kerberos-authentication/#client-encryption) for the client connection string parameter, `sasl_encrypt` usage information.
 
diff --git a/docs/about-sql-function-examples/index.html b/docs/about-sql-function-examples/index.html
index 2f9cf06..8fc762f 100644
--- a/docs/about-sql-function-examples/index.html
+++ b/docs/about-sql-function-examples/index.html
@@ -294,8 +294,6 @@
               
                 <li class="toctree-l3"><a class="reference internal" href="/docs/embedded-mode-prerequisites/">Embedded Mode Prerequisites</a></li>
               
-                <li class="toctree-l3"><a class="reference internal" href="/docs/running-drill-on-docker/">Running Drill on Docker</a></li>
-              
                 <li class="toctree-l3"><a class="reference internal" href="/docs/installing-drill-on-linux-and-mac-os-x/">Installing Drill on Linux and Mac OS X</a></li>
               
                 <li class="toctree-l3"><a class="reference internal" href="/docs/starting-drill-on-linux-and-mac-os-x/">Starting Drill on Linux and Mac OS X</a></li>
@@ -459,10 +457,6 @@
             
           
             
-              <li class="toctree-l2"><a class="reference internal" href="/docs/logfile-plugin/">Logfile Plugin</a></li>
-            
-          
-            
               <li class="toctree-l2"><a class="reference internal" href="/docs/hbase-storage-plugin/">HBase Storage Plugin</a></li>
             
           
@@ -487,10 +481,6 @@
             
           
             
-              <li class="toctree-l2"><a class="reference internal" href="/docs/httpd-format-plugin/">HTTPD Format Plugin</a></li>
-            
-          
-            
               <li class="toctree-l2"><a class="reference internal" href="/docs/opentsdb-storage-plugin/">OpenTSDB Storage Plugin</a></li>
             
           
@@ -499,19 +489,15 @@
             
           
             
-              <li class="toctree-l2"><a class="reference internal" href="/docs/image-metadata-format-plugin/">Image Metadata Format Plugin</a></li>
-            
-          
-            
               <li class="toctree-l2"><a class="reference internal" href="/docs/azure-blob-storage-plugin/">Azure Blob Storage Plugin</a></li>
             
           
             
-              <li class="toctree-l2"><a class="reference internal" href="/docs/syslog-format-plugin/">Syslog Format Plugin</a></li>
+              <li class="toctree-l2"><a class="reference internal" href="/docs/druid-storage-plugin/">Druid Storage Plugin</a></li>
             
           
             
-              <li class="toctree-l2"><a class="reference internal" href="/docs/ltsv-format-plugin/">LTSV Format Plugin</a></li>
+              <li class="toctree-l2"><a class="reference internal" href="/docs/http-storage-plugin/">HTTP Storage Plugin</a></li>
             
           
           </ul>
@@ -886,6 +872,12 @@
               
                 <li class="toctree-l3"><a class="reference internal" href="/docs/cryptography-functions/">Cryptography Functions</a></li>
               
+                <li class="toctree-l3"><a class="reference internal" href="/docs/sql-dialect-compatibility-functions/">SQL dialect compatibility functions</a></li>
+              
+                <li class="toctree-l3"><a class="reference internal" href="/docs/gis-functions/">GIS functions</a></li>
+              
+                <li class="toctree-l3"><a class="reference internal" href="/docs/time-series-analysis-functions/">Time Series Analysis Functions</a></li>
+              
             </ul>
             
           
@@ -1043,6 +1035,10 @@
             
           
             
+              <li class="toctree-l2"><a class="reference internal" href="/docs/logfile-plugin/">Logfile Plugin</a></li>
+            
+          
+            
               <li class="toctree-l2"><a class="reference internal" href="/docs/json-data-model/">JSON Data Model</a></li>
             
           
@@ -1054,6 +1050,38 @@
               <li class="toctree-l2"><a class="reference internal" href="/docs/sequence-files/">Sequence Files</a></li>
             
           
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/httpd-format-plugin/">HTTPD Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/image-metadata-format-plugin/">Image Metadata Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/syslog-format-plugin/">Syslog Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/ltsv-format-plugin/">LTSV Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/spss-format-plugin/">SPSS Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/esri-shapefile-format-plugin/">ESRI Shapefile Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/excel-format-plugin/">Excel Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/hdf5-format-plugin/">HDF5 Format Plugin</a></li>
+            
+          
           </ul>
         
       
@@ -1171,6 +1199,10 @@
           <ul style="display: none">
           
             
+              <li class="toctree-l2"><a class="reference internal" href="/docs/apache-drill-1-18-0-release-notes/">Apache Drill 1.18.0 Release Notes</a></li>
+            
+          
+            
               <li class="toctree-l2"><a class="reference internal" href="/docs/apache-drill-1-17-0-release-notes/">Apache Drill 1.17.0 Release Notes</a></li>
             
           
@@ -1337,13 +1369,22 @@
 
     </div>
 
-     Nov 2, 2018
-
-    <link href="/css/docpage.css" rel="stylesheet" type="text/css">
+    <!-- jt: we don't need to display a last-modified date on each page to users
+     Dec 10, 2020
+    --> 
 
     <div class="int_text" align="left">
       
-        <p>You need to use a FROM clause in Drill queries. You can use the VALUES clause in the FROM clause to define rows of data in a derived table. The derived table has statement level scope.</p>
+        <p>Historically it was necessary to use a FROM clause in Drill queries and many examples documented here still use a VALUES clause in the FROM clause to define rows of data in a derived table of statement level scope.  Since Drill 0.4.0, the FROM clause has been optional allowing you to test functions using briefer syntax, e.g.</p>
+
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>SELECT SQRT(2);
+
+|--------------------|
+| EXPR$0             |
+|--------------------|
+| 1.4142135623730951 |
+|--------------------|
+</code></pre></div></div>
 
     
       
@@ -1361,7 +1402,7 @@
   <p class="push"></p>
 <div id="footer" class="mw">
 <div class="wrapper">
-Copyright © 2012-2014 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
+Copyright © 2012-2020 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
 Apache and the Apache feather logo are trademarks of The Apache Software Foundation. Other names appearing on the site may be trademarks of their respective owners.<br/><br/>
 </div>
 </div>
diff --git a/docs/about-the-mapr-sandbox/index.html b/docs/about-the-mapr-sandbox/index.html
index 1c69fba..ccc67d4 100644
--- a/docs/about-the-mapr-sandbox/index.html
+++ b/docs/about-the-mapr-sandbox/index.html
@@ -294,8 +294,6 @@
               
                 <li class="toctree-l3"><a class="reference internal" href="/docs/embedded-mode-prerequisites/">Embedded Mode Prerequisites</a></li>
               
-                <li class="toctree-l3"><a class="reference internal" href="/docs/running-drill-on-docker/">Running Drill on Docker</a></li>
-              
                 <li class="toctree-l3"><a class="reference internal" href="/docs/installing-drill-on-linux-and-mac-os-x/">Installing Drill on Linux and Mac OS X</a></li>
               
                 <li class="toctree-l3"><a class="reference internal" href="/docs/starting-drill-on-linux-and-mac-os-x/">Starting Drill on Linux and Mac OS X</a></li>
@@ -459,10 +457,6 @@
             
           
             
-              <li class="toctree-l2"><a class="reference internal" href="/docs/logfile-plugin/">Logfile Plugin</a></li>
-            
-          
-            
               <li class="toctree-l2"><a class="reference internal" href="/docs/hbase-storage-plugin/">HBase Storage Plugin</a></li>
             
           
@@ -487,10 +481,6 @@
             
           
             
-              <li class="toctree-l2"><a class="reference internal" href="/docs/httpd-format-plugin/">HTTPD Format Plugin</a></li>
-            
-          
-            
               <li class="toctree-l2"><a class="reference internal" href="/docs/opentsdb-storage-plugin/">OpenTSDB Storage Plugin</a></li>
             
           
@@ -499,19 +489,15 @@
             
           
             
-              <li class="toctree-l2"><a class="reference internal" href="/docs/image-metadata-format-plugin/">Image Metadata Format Plugin</a></li>
-            
-          
-            
               <li class="toctree-l2"><a class="reference internal" href="/docs/azure-blob-storage-plugin/">Azure Blob Storage Plugin</a></li>
             
           
             
-              <li class="toctree-l2"><a class="reference internal" href="/docs/syslog-format-plugin/">Syslog Format Plugin</a></li>
+              <li class="toctree-l2"><a class="reference internal" href="/docs/druid-storage-plugin/">Druid Storage Plugin</a></li>
             
           
             
-              <li class="toctree-l2"><a class="reference internal" href="/docs/ltsv-format-plugin/">LTSV Format Plugin</a></li>
+              <li class="toctree-l2"><a class="reference internal" href="/docs/http-storage-plugin/">HTTP Storage Plugin</a></li>
             
           
           </ul>
@@ -886,6 +872,12 @@
               
                 <li class="toctree-l3"><a class="reference internal" href="/docs/cryptography-functions/">Cryptography Functions</a></li>
               
+                <li class="toctree-l3"><a class="reference internal" href="/docs/sql-dialect-compatibility-functions/">SQL dialect compatibility functions</a></li>
+              
+                <li class="toctree-l3"><a class="reference internal" href="/docs/gis-functions/">GIS functions</a></li>
+              
+                <li class="toctree-l3"><a class="reference internal" href="/docs/time-series-analysis-functions/">Time Series Analysis Functions</a></li>
+              
             </ul>
             
           
@@ -1043,6 +1035,10 @@
             
           
             
+              <li class="toctree-l2"><a class="reference internal" href="/docs/logfile-plugin/">Logfile Plugin</a></li>
+            
+          
+            
               <li class="toctree-l2"><a class="reference internal" href="/docs/json-data-model/">JSON Data Model</a></li>
             
           
@@ -1054,6 +1050,38 @@
               <li class="toctree-l2"><a class="reference internal" href="/docs/sequence-files/">Sequence Files</a></li>
             
           
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/httpd-format-plugin/">HTTPD Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/image-metadata-format-plugin/">Image Metadata Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/syslog-format-plugin/">Syslog Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/ltsv-format-plugin/">LTSV Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/spss-format-plugin/">SPSS Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/esri-shapefile-format-plugin/">ESRI Shapefile Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/excel-format-plugin/">Excel Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/hdf5-format-plugin/">HDF5 Format Plugin</a></li>
+            
+          
           </ul>
         
       
@@ -1171,6 +1199,10 @@
           <ul style="display: none">
           
             
+              <li class="toctree-l2"><a class="reference internal" href="/docs/apache-drill-1-18-0-release-notes/">Apache Drill 1.18.0 Release Notes</a></li>
+            
+          
+            
               <li class="toctree-l2"><a class="reference internal" href="/docs/apache-drill-1-17-0-release-notes/">Apache Drill 1.17.0 Release Notes</a></li>
             
           
@@ -1337,9 +1369,9 @@
 
     </div>
 
-     Nov 2, 2018
-
-    <link href="/css/docpage.css" rel="stylesheet" type="text/css">
+    <!-- jt: we don't need to display a last-modified date on each page to users
+     Dec 10, 2020
+    --> 
 
     <div class="int_text" align="left">
       
@@ -1348,13 +1380,14 @@ Sandbox with Drill is a fully functional single-node cluster that can
 be used to get an overview of Drill in a Hadoop environment. Business
 and technical analysts, product managers, and developers can use the sandbox
 environment to get a feel for the power and capabilities of Drill by
-performing various types of queries. </p>
+performing various types of queries.</p>
 
 <p>Hadoop is not a prerequisite for Drill and users can start ramping
 up with Drill by running SQL queries directly on the local file system. Refer
 to <a href="/docs/drill-in-10-minutes">Apache Drill in 10 minutes</a> for an introduction to using Drill in local
 (embedded) mode.</p>
 
+
     
       
         <div class="doc-nav">
@@ -1371,7 +1404,7 @@ to <a href="/docs/drill-in-10-minutes">Apache Drill in 10 minutes</a> for an int
   <p class="push"></p>
 <div id="footer" class="mw">
 <div class="wrapper">
-Copyright © 2012-2014 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
+Copyright © 2012-2020 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
 Apache and the Apache feather logo are trademarks of The Apache Software Foundation. Other names appearing on the site may be trademarks of their respective owners.<br/><br/>
 </div>
 </div>
diff --git a/docs/adding-custom-functions-to-drill-introduction/index.html b/docs/adding-custom-functions-to-drill-introduction/index.html
index 710491d..9adfd75 100644
--- a/docs/adding-custom-functions-to-drill-introduction/index.html
+++ b/docs/adding-custom-functions-to-drill-introduction/index.html
@@ -294,8 +294,6 @@
               
                 <li class="toctree-l3"><a class="reference internal" href="/docs/embedded-mode-prerequisites/">Embedded Mode Prerequisites</a></li>
               
-                <li class="toctree-l3"><a class="reference internal" href="/docs/running-drill-on-docker/">Running Drill on Docker</a></li>
-              
                 <li class="toctree-l3"><a class="reference internal" href="/docs/installing-drill-on-linux-and-mac-os-x/">Installing Drill on Linux and Mac OS X</a></li>
               
                 <li class="toctree-l3"><a class="reference internal" href="/docs/starting-drill-on-linux-and-mac-os-x/">Starting Drill on Linux and Mac OS X</a></li>
@@ -459,10 +457,6 @@
             
           
             
-              <li class="toctree-l2"><a class="reference internal" href="/docs/logfile-plugin/">Logfile Plugin</a></li>
-            
-          
-            
               <li class="toctree-l2"><a class="reference internal" href="/docs/hbase-storage-plugin/">HBase Storage Plugin</a></li>
             
           
@@ -487,10 +481,6 @@
             
           
             
-              <li class="toctree-l2"><a class="reference internal" href="/docs/httpd-format-plugin/">HTTPD Format Plugin</a></li>
-            
-          
-            
               <li class="toctree-l2"><a class="reference internal" href="/docs/opentsdb-storage-plugin/">OpenTSDB Storage Plugin</a></li>
             
           
@@ -499,19 +489,15 @@
             
           
             
-              <li class="toctree-l2"><a class="reference internal" href="/docs/image-metadata-format-plugin/">Image Metadata Format Plugin</a></li>
-            
-          
-            
               <li class="toctree-l2"><a class="reference internal" href="/docs/azure-blob-storage-plugin/">Azure Blob Storage Plugin</a></li>
             
           
             
-              <li class="toctree-l2"><a class="reference internal" href="/docs/syslog-format-plugin/">Syslog Format Plugin</a></li>
+              <li class="toctree-l2"><a class="reference internal" href="/docs/druid-storage-plugin/">Druid Storage Plugin</a></li>
             
           
             
-              <li class="toctree-l2"><a class="reference internal" href="/docs/ltsv-format-plugin/">LTSV Format Plugin</a></li>
+              <li class="toctree-l2"><a class="reference internal" href="/docs/http-storage-plugin/">HTTP Storage Plugin</a></li>
             
           
           </ul>
@@ -886,6 +872,12 @@
               
                 <li class="toctree-l3"><a class="reference internal" href="/docs/cryptography-functions/">Cryptography Functions</a></li>
               
+                <li class="toctree-l3"><a class="reference internal" href="/docs/sql-dialect-compatibility-functions/">SQL dialect compatibility functions</a></li>
+              
+                <li class="toctree-l3"><a class="reference internal" href="/docs/gis-functions/">GIS functions</a></li>
+              
+                <li class="toctree-l3"><a class="reference internal" href="/docs/time-series-analysis-functions/">Time Series Analysis Functions</a></li>
+              
             </ul>
             
           
@@ -1043,6 +1035,10 @@
             
           
             
+              <li class="toctree-l2"><a class="reference internal" href="/docs/logfile-plugin/">Logfile Plugin</a></li>
+            
+          
+            
               <li class="toctree-l2"><a class="reference internal" href="/docs/json-data-model/">JSON Data Model</a></li>
             
           
@@ -1054,6 +1050,38 @@
               <li class="toctree-l2"><a class="reference internal" href="/docs/sequence-files/">Sequence Files</a></li>
             
           
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/httpd-format-plugin/">HTTPD Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/image-metadata-format-plugin/">Image Metadata Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/syslog-format-plugin/">Syslog Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/ltsv-format-plugin/">LTSV Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/spss-format-plugin/">SPSS Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/esri-shapefile-format-plugin/">ESRI Shapefile Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/excel-format-plugin/">Excel Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/hdf5-format-plugin/">HDF5 Format Plugin</a></li>
+            
+          
           </ul>
         
       
@@ -1171,6 +1199,10 @@
           <ul style="display: none">
           
             
+              <li class="toctree-l2"><a class="reference internal" href="/docs/apache-drill-1-18-0-release-notes/">Apache Drill 1.18.0 Release Notes</a></li>
+            
+          
+            
               <li class="toctree-l2"><a class="reference internal" href="/docs/apache-drill-1-17-0-release-notes/">Apache Drill 1.17.0 Release Notes</a></li>
             
           
@@ -1337,17 +1369,17 @@
 
     </div>
 
-     Jul 18, 2018
-
-    <link href="/css/docpage.css" rel="stylesheet" type="text/css">
+    <!-- jt: we don't need to display a last-modified date on each page to users
+     Dec 10, 2020
+    --> 
 
     <div class="int_text" align="left">
       
         <p>Starting in Drill 1.9, there are two methods for adding custom functions to Drill. An administrator can manually add functions to Drill, or provide users access to a staging directory where they can upload JAR files and register their UDFs using the CREATE FUNCTION USING JAR command. The CREATE FUNCTION USING JAR command is part of the Dynamic UDF feature.</p>
 
 <ul>
-<li>For manual instructions, see <a href="/docs/manually-adding-custom-functions-to-drill/">Manually Adding Custom Functions to Drill</a>. </li>
-<li>For Dynamic UDF information and instructions, see <a href="/docs/dynamic-udfs/">Dynamic UDFs</a>. </li>
+  <li>For manual instructions, see <a href="/docs/manually-adding-custom-functions-to-drill/">Manually Adding Custom Functions to Drill</a>.</li>
+  <li>For Dynamic UDF information and instructions, see <a href="/docs/dynamic-udfs/">Dynamic UDFs</a>.</li>
 </ul>
 
     
@@ -1366,7 +1398,7 @@
   <p class="push"></p>
 <div id="footer" class="mw">
 <div class="wrapper">
-Copyright © 2012-2014 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
+Copyright © 2012-2020 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
 Apache and the Apache feather logo are trademarks of The Apache Software Foundation. Other names appearing on the site may be trademarks of their respective owners.<br/><br/>
 </div>
 </div>
diff --git a/docs/adding-custom-functions-to-drill/index.html b/docs/adding-custom-functions-to-drill/index.html
index e910edd..c483791 100644
--- a/docs/adding-custom-functions-to-drill/index.html
+++ b/docs/adding-custom-functions-to-drill/index.html
@@ -294,8 +294,6 @@
               
                 <li class="toctree-l3"><a class="reference internal" href="/docs/embedded-mode-prerequisites/">Embedded Mode Prerequisites</a></li>
               
-                <li class="toctree-l3"><a class="reference internal" href="/docs/running-drill-on-docker/">Running Drill on Docker</a></li>
-              
                 <li class="toctree-l3"><a class="reference internal" href="/docs/installing-drill-on-linux-and-mac-os-x/">Installing Drill on Linux and Mac OS X</a></li>
               
                 <li class="toctree-l3"><a class="reference internal" href="/docs/starting-drill-on-linux-and-mac-os-x/">Starting Drill on Linux and Mac OS X</a></li>
@@ -459,10 +457,6 @@
             
           
             
-              <li class="toctree-l2"><a class="reference internal" href="/docs/logfile-plugin/">Logfile Plugin</a></li>
-            
-          
-            
               <li class="toctree-l2"><a class="reference internal" href="/docs/hbase-storage-plugin/">HBase Storage Plugin</a></li>
             
           
@@ -487,10 +481,6 @@
             
           
             
-              <li class="toctree-l2"><a class="reference internal" href="/docs/httpd-format-plugin/">HTTPD Format Plugin</a></li>
-            
-          
-            
               <li class="toctree-l2"><a class="reference internal" href="/docs/opentsdb-storage-plugin/">OpenTSDB Storage Plugin</a></li>
             
           
@@ -499,19 +489,15 @@
             
           
             
-              <li class="toctree-l2"><a class="reference internal" href="/docs/image-metadata-format-plugin/">Image Metadata Format Plugin</a></li>
-            
-          
-            
               <li class="toctree-l2"><a class="reference internal" href="/docs/azure-blob-storage-plugin/">Azure Blob Storage Plugin</a></li>
             
           
             
-              <li class="toctree-l2"><a class="reference internal" href="/docs/syslog-format-plugin/">Syslog Format Plugin</a></li>
+              <li class="toctree-l2"><a class="reference internal" href="/docs/druid-storage-plugin/">Druid Storage Plugin</a></li>
             
           
             
-              <li class="toctree-l2"><a class="reference internal" href="/docs/ltsv-format-plugin/">LTSV Format Plugin</a></li>
+              <li class="toctree-l2"><a class="reference internal" href="/docs/http-storage-plugin/">HTTP Storage Plugin</a></li>
             
           
           </ul>
@@ -886,6 +872,12 @@
               
                 <li class="toctree-l3"><a class="reference internal" href="/docs/cryptography-functions/">Cryptography Functions</a></li>
               
+                <li class="toctree-l3"><a class="reference internal" href="/docs/sql-dialect-compatibility-functions/">SQL dialect compatibility functions</a></li>
+              
+                <li class="toctree-l3"><a class="reference internal" href="/docs/gis-functions/">GIS functions</a></li>
+              
+                <li class="toctree-l3"><a class="reference internal" href="/docs/time-series-analysis-functions/">Time Series Analysis Functions</a></li>
+              
             </ul>
             
           
@@ -1043,6 +1035,10 @@
             
           
             
+              <li class="toctree-l2"><a class="reference internal" href="/docs/logfile-plugin/">Logfile Plugin</a></li>
+            
+          
+            
               <li class="toctree-l2"><a class="reference internal" href="/docs/json-data-model/">JSON Data Model</a></li>
             
           
@@ -1054,6 +1050,38 @@
               <li class="toctree-l2"><a class="reference internal" href="/docs/sequence-files/">Sequence Files</a></li>
             
           
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/httpd-format-plugin/">HTTPD Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/image-metadata-format-plugin/">Image Metadata Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/syslog-format-plugin/">Syslog Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/ltsv-format-plugin/">LTSV Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/spss-format-plugin/">SPSS Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/esri-shapefile-format-plugin/">ESRI Shapefile Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/excel-format-plugin/">Excel Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/hdf5-format-plugin/">HDF5 Format Plugin</a></li>
+            
+          
           </ul>
         
       
@@ -1171,6 +1199,10 @@
           <ul style="display: none">
           
             
+              <li class="toctree-l2"><a class="reference internal" href="/docs/apache-drill-1-18-0-release-notes/">Apache Drill 1.18.0 Release Notes</a></li>
+            
+          
+            
               <li class="toctree-l2"><a class="reference internal" href="/docs/apache-drill-1-17-0-release-notes/">Apache Drill 1.17.0 Release Notes</a></li>
             
           
@@ -1335,9 +1367,9 @@
 
     </div>
 
-     Nov 2, 2018
-
-    <link href="/css/docpage.css" rel="stylesheet" type="text/css">
+    <!-- jt: we don't need to display a last-modified date on each page to users
+     Dec 10, 2020
+    --> 
 
     <div class="int_text" align="left">
       
@@ -1372,7 +1404,7 @@
   <p class="push"></p>
 <div id="footer" class="mw">
 <div class="wrapper">
-Copyright © 2012-2014 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
+Copyright © 2012-2020 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
 Apache and the Apache feather logo are trademarks of The Apache Software Foundation. Other names appearing on the site may be trademarks of their respective owners.<br/><br/>
 </div>
 </div>
diff --git a/docs/aggregate-and-aggregate-statistical/index.html b/docs/aggregate-and-aggregate-statistical/index.html
index db7e1c4..4c5088a 100644
--- a/docs/aggregate-and-aggregate-statistical/index.html
+++ b/docs/aggregate-and-aggregate-statistical/index.html
@@ -294,8 +294,6 @@
               
                 <li class="toctree-l3"><a class="reference internal" href="/docs/embedded-mode-prerequisites/">Embedded Mode Prerequisites</a></li>
               
-                <li class="toctree-l3"><a class="reference internal" href="/docs/running-drill-on-docker/">Running Drill on Docker</a></li>
-              
                 <li class="toctree-l3"><a class="reference internal" href="/docs/installing-drill-on-linux-and-mac-os-x/">Installing Drill on Linux and Mac OS X</a></li>
               
                 <li class="toctree-l3"><a class="reference internal" href="/docs/starting-drill-on-linux-and-mac-os-x/">Starting Drill on Linux and Mac OS X</a></li>
@@ -459,10 +457,6 @@
             
           
             
-              <li class="toctree-l2"><a class="reference internal" href="/docs/logfile-plugin/">Logfile Plugin</a></li>
-            
-          
-            
               <li class="toctree-l2"><a class="reference internal" href="/docs/hbase-storage-plugin/">HBase Storage Plugin</a></li>
             
           
@@ -487,10 +481,6 @@
             
           
             
-              <li class="toctree-l2"><a class="reference internal" href="/docs/httpd-format-plugin/">HTTPD Format Plugin</a></li>
-            
-          
-            
               <li class="toctree-l2"><a class="reference internal" href="/docs/opentsdb-storage-plugin/">OpenTSDB Storage Plugin</a></li>
             
           
@@ -499,19 +489,15 @@
             
           
             
-              <li class="toctree-l2"><a class="reference internal" href="/docs/image-metadata-format-plugin/">Image Metadata Format Plugin</a></li>
-            
-          
-            
               <li class="toctree-l2"><a class="reference internal" href="/docs/azure-blob-storage-plugin/">Azure Blob Storage Plugin</a></li>
             
           
             
-              <li class="toctree-l2"><a class="reference internal" href="/docs/syslog-format-plugin/">Syslog Format Plugin</a></li>
+              <li class="toctree-l2"><a class="reference internal" href="/docs/druid-storage-plugin/">Druid Storage Plugin</a></li>
             
           
             
-              <li class="toctree-l2"><a class="reference internal" href="/docs/ltsv-format-plugin/">LTSV Format Plugin</a></li>
+              <li class="toctree-l2"><a class="reference internal" href="/docs/http-storage-plugin/">HTTP Storage Plugin</a></li>
             
           
           </ul>
@@ -886,6 +872,12 @@
               
                 <li class="toctree-l3"><a class="reference internal" href="/docs/cryptography-functions/">Cryptography Functions</a></li>
               
+                <li class="toctree-l3"><a class="reference internal" href="/docs/sql-dialect-compatibility-functions/">SQL dialect compatibility functions</a></li>
+              
+                <li class="toctree-l3"><a class="reference internal" href="/docs/gis-functions/">GIS functions</a></li>
+              
+                <li class="toctree-l3"><a class="reference internal" href="/docs/time-series-analysis-functions/">Time Series Analysis Functions</a></li>
+              
             </ul>
             
           
@@ -1043,6 +1035,10 @@
             
           
             
+              <li class="toctree-l2"><a class="reference internal" href="/docs/logfile-plugin/">Logfile Plugin</a></li>
+            
+          
+            
               <li class="toctree-l2"><a class="reference internal" href="/docs/json-data-model/">JSON Data Model</a></li>
             
           
@@ -1054,6 +1050,38 @@
               <li class="toctree-l2"><a class="reference internal" href="/docs/sequence-files/">Sequence Files</a></li>
             
           
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/httpd-format-plugin/">HTTPD Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/image-metadata-format-plugin/">Image Metadata Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/syslog-format-plugin/">Syslog Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/ltsv-format-plugin/">LTSV Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/spss-format-plugin/">SPSS Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/esri-shapefile-format-plugin/">ESRI Shapefile Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/excel-format-plugin/">Excel Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/hdf5-format-plugin/">HDF5 Format Plugin</a></li>
+            
+          
           </ul>
         
       
@@ -1171,6 +1199,10 @@
           <ul style="display: none">
           
             
+              <li class="toctree-l2"><a class="reference internal" href="/docs/apache-drill-1-18-0-release-notes/">Apache Drill 1.18.0 Release Notes</a></li>
+            
+          
+            
               <li class="toctree-l2"><a class="reference internal" href="/docs/apache-drill-1-17-0-release-notes/">Apache Drill 1.17.0 Release Notes</a></li>
             
           
@@ -1337,424 +1369,550 @@
 
     </div>
 
-     Jan 15, 2019
-
-    <link href="/css/docpage.css" rel="stylesheet" type="text/css">
+    <!-- jt: we don't need to display a last-modified date on each page to users
+     Dec 10, 2020
+    --> 
 
     <div class="int_text" align="left">
       
         <h2 id="aggregate-functions">Aggregate Functions</h2>
 
-<p>The following tables list the aggregate and aggregate statistical functions that you can use in 
-Drill queries:  </p>
-
-<table><thead>
-<tr>
-<th><strong>Function</strong></th>
-<th><strong>Argument Type</strong></th>
-<th><strong>Return Type</strong></th>
-</tr>
-</thead><tbody>
-<tr>
-<td>ANY_VALUE(expression)</td>
-<td>Bit, Int, BigInt, Float4, Float8, Date, Timestamp, Time, VarChar, VarBinary, List, Map, Interval, IntervalDay, IntervalYear, VarDecimal</td>
-<td>same as argument type</td>
-</tr>
-<tr>
-<td></td>
-<td></td>
-<td></td>
-</tr>
-<tr>
-<td>AVG(expression)</td>
-<td>SMALLINT,   INTEGER, BIGINT, FLOAT, DOUBLE, DECIMAL, INTERVAL</td>
-<td>DECIMAL for DECIMAL argument,   DOUBLE for all other arguments</td>
-</tr>
-<tr>
-<td>COUNT(*)</td>
-<td>-</td>
-<td>BIGINT</td>
-</tr>
-<tr>
-<td>COUNT([DISTINCT] expression)</td>
-<td>any</td>
-<td>BIGINT</td>
-</tr>
-<tr>
-<td>MAX(expression)</td>
-<td>BINARY,   DECIMAL, VARCHAR, DATE, TIME, or TIMESTAMP</td>
-<td>same   as argument type</td>
-</tr>
-<tr>
-<td>MIN(expression)</td>
-<td>BINARY,   DECIMAL, VARCHAR, DATE, TIME, or TIMESTAMP</td>
-<td>same   as argument type</td>
-</tr>
-<tr>
-<td>SUM(expression)</td>
-<td>SMALLINT,   INTEGER, BIGINT, FLOAT, DOUBLE, DECIMAL, INTERVAL</td>
-<td>DECIMAL for DECIMAL   argument,     BIGINT for any integer-type argument (including BIGINT), DOUBLE for   floating-point arguments</td>
-</tr>
-</tbody></table>
+<p>The following table lists the aggregate functions that you can use in Drill queries.</p>
+
+<table>
+  <thead>
+    <tr>
+      <th><strong>Function</strong></th>
+      <th><strong>Argument Type</strong></th>
+      <th><strong>Return Type</strong></th>
+    </tr>
+  </thead>
+  <tbody>
+    <tr>
+      <td>ANY_VALUE(expression)</td>
+      <td>BIT, INT, BIGINT, FLOAT4, FLOAT8, DATE, TIMESTAMP, TIME, VARCHAR, VARBINARY, LIST, MAP, INTERVAL, INTERVALDAY, INTERVALYEAR, VARDECIMAL</td>
+      <td>Same as argument type</td>
+    </tr>
+    <tr>
+      <td>AVG(expression)</td>
+      <td>SMALLINT,   INTEGER, BIGINT, FLOAT, DOUBLE, DECIMAL, INTERVAL</td>
+      <td>DECIMAL for DECIMAL argument,   DOUBLE for all other arguments</td>
+    </tr>
+    <tr>
+      <td>BOOL_AND(expression), BOOL_OR(expression)</td>
+      <td>BIT</td>
+      <td>BIT</td>
+    </tr>
+    <tr>
+      <td>COUNT(*)</td>
+      <td>-</td>
+      <td>BIGINT</td>
+    </tr>
+    <tr>
+      <td>COUNT([DISTINCT] expression)</td>
+      <td>any</td>
+      <td>BIGINT</td>
+    </tr>
+    <tr>
+      <td>MAX(expression), MIN(expression)</td>
+      <td>BINARY, DECIMAL, VARCHAR, DATE, TIME, or TIMESTAMP</td>
+      <td>Same   as argument type</td>
+    </tr>
+    <tr>
+      <td>SUM(expression)</td>
+      <td>SMALLINT, INTEGER, BIGINT, FLOAT, DOUBLE, DECIMAL, INTERVAL</td>
+      <td>DECIMAL for DECIMAL   argument,     BIGINT for any integer-type argument (including BIGINT), DOUBLE for   floating-point arguments</td>
+    </tr>
+  </tbody>
+</table>
 
 <ul>
-<li>Drill 1.14 and later supports the ANY_VALUE function. </li>
-<li>Starting in Drill 1.14, the DECIMAL data type is enabled by default.<br></li>
-<li>AVG, COUNT, MIN, MAX, and SUM accept ALL and DISTINCT keywords. The default is ALL.<br></li>
-<li>The aggregate function examples use the <code>cp</code> storage plugin to access the <a href="/docs/querying-json-files/"><code>employee.json</code></a> file installed with Drill. By default, JSON reads numbers as double-precision floating point numbers. These examples assume that you are using the default option <a href="/docs/json-data-model/#handling-type-differences">all_text_mode</a> set to false.<br></li>
+  <li>Drill 1.14 and later supports the ANY_VALUE function.</li>
+  <li>Starting in Drill 1.14, the DECIMAL data type is enabled by default.</li>
+  <li>AVG, COUNT, MIN, MAX, and SUM accept ALL and DISTINCT keywords. The default is ALL.</li>
+  <li>The aggregate function examples use the <code class="language-plaintext highlighter-rouge">cp</code> storage plugin to access the <a href="/docs/querying-json-files/"><code class="language-plaintext highlighter-rouge">employee.json</code></a> file installed with Drill. By default, JSON reads numbers as double-precision floating point numbers. These examples assume that you are using the default option <a href="/docs/json-data-model/#handling-type-differences">all_text_mode</a> set  [...]
 </ul>
 
 <h2 id="any_value">ANY_VALUE</h2>
-
-<p>Supported in Drill 1.14 and later. Returns one of the values of value across all input values. This function is NOT specified in the SQL standard.  </p>
+<p>Supported in Drill 1.14 and later. Returns one of the values of value across all input values. This function is NOT specified in the SQL standard.</p>
 
 <h3 id="any_value-syntax">ANY_VALUE Syntax</h3>
-
-<p>ANY_VALUE( [ ALL | DISTINCT ] value)  </p>
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>ANY_VALUE([ ALL | DISTINCT ] value)  
+</code></pre></div></div>
 
 <h3 id="any_value-examples">ANY_VALUE Examples</h3>
-<div class="highlight"><pre><code class="language-text" data-lang="text">SELECT ANY_VALUE(employee_id) AS anyemp FROM cp.`employee.json`;
-+---------+
-| anyemp  |
-+---------+
-| 1156    |
-+---------+  
+
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>SELECT ANY_VALUE(employee_id) AS anyemp FROM cp.`employee.json`;
+|--------|
+| anyemp |
+|--------|
+| 1156   |
+|--------|
 
 SELECT ANY_VALUE(ALL employee_id) AS anyemp FROM cp.`employee.json`;
-+---------+
-| anyemp  |
-+---------+
-| 1156    |
-+---------+
+|--------|
+| anyemp |
+|--------|
+| 1156   |
+|--------|
 
 SELECT ANY_VALUE(DISTINCT employee_id) AS anyemp FROM cp.`employee.json`;
-+---------+
-| anyemp  |
-+---------+
-| 1156    |
-+---------+  
+|--------|
+| anyemp |
+|--------|
+| 1156   |
+|--------|
 
 SELECT ANY_VALUE(employee_id) as anyemp, salary as empsal FROM cp.`employee.json` GROUP BY salary;
-+---------+----------+
-| anyemp  |  empsal  |
-+---------+----------+
-| 1155    | 20.0     |
-| 197     | 3700.0   |
-| 1115    | 4200.0   |
-| 589     | 4300.0   |
-| 403     | 4400.0   |
-| 204     | 4500.0   |
-| 201     | 4550.0   |
-| 206     | 4600.0   |
-| 264     | 4650.0   |
-| 267     | 4700.0   |
-| 632     | 4800.0   |
-| 42      | 5000.0   |
-| 590     | 5200.0   |
-| 733     | 5900.0   |
-| 1144    | 6100.0   |
-| 625     | 6200.0   |
-| 1141    | 6400.0   |
-| 588     | 6500.0   |
-| 46      | 6600.0   |
-| 1075    | 6700.0   |
-| 1079    | 6800.0   |
-| 1028    | 6900.0   |
-| 900     | 7000.0   |
-| 63      | 7100.0   |
-| 60      | 7200.0   |
-| 69      | 7500.0   |
-| 764     | 7900.0   |
-| 957     | 8000.0   |
-| 566     | 8100.0   |
-| 171     | 8200.0   |
-| 489     | 8500.0   |
-| 226     | 8900.0   |
-| 490     | 9000.0   |
-| 8       | 10000.0  |
-| 35      | 11000.0  |
-| 53      | 12000.0  |
-| 484     | 13000.0  |
-| 31      | 14000.0  |
-| 13      | 15000.0  |
-| 27      | 16000.0  |
-| 33      | 17000.0  |
-| 6       | 25000.0  |
-| 20      | 30000.0  |
-| 21      | 35000.0  |
-| 4       | 40000.0  |
-| 36      | 45000.0  |
-| 10      | 50000.0  |
-| 1       | 80000.0  |
-+---------+----------+  
+|--------|---------|
+| anyemp | empsal  |
+|--------|---------|
+| 1155   | 20.0    |
+| 197    | 3700.0  |
+| 1115   | 4200.0  |
+| 589    | 4300.0  |
+| 403    | 4400.0  |
+| 204    | 4500.0  |
+...
 
 SELECT ANY_VALUE(employee_id) as anyemp FROM cp.`employee.json` GROUP BY salary ORDER BY anyemp;
-+-----------+
-| anyemp    |
-+-----------+
-| 1         |
-| 4         |
-| 6         |
-| 8         |
-| 10        |
-| 13        |
-| 20        |
-| 21        |
-| 27        |
-| 31        |
-| 33        |
-| 35        |
-| 36        |
-| 42        |
-| 46        |
-| 53        |
-| 60        |
-| 63        |
-| 69        |
-| 171       |
-| 197       |
-| 201       |
-| 204       |
-| 206       |
-| 226       |
-| 264       |
-| 267       |
-| 403       |
-| 484       |
-| 489       |
-| 490       |
-| 566       |
-| 588       |
-| 589       |
-| 590       |
-| 625       |
-| 632       |
-| 733       |
-| 764       |
-| 900       |
-| 957       |
-| 1028      |
-| 1075      |
-| 1079      |
-| 1115      |
-| 1141      |
-| 1144      |
-| 1155      |
-+-----------+  
-</code></pre></div>
+|--------|
+| anyemp |
+|--------|
+| 1      |
+| 4      |
+| 6      |
+| 8      |
+| 10     |
+| 13     |
+...
+</code></pre></div></div>
+
 <h2 id="avg">AVG</h2>
 
-<p>Averages a column of all records in a data source. Averages a column of one or more groups of records. Which records to include in the calculation can be based on a condition.</p>
+<p>Returns the average of a numerical expression.</p>
 
 <h3 id="avg-syntax">AVG Syntax</h3>
-<div class="highlight"><pre><code class="language-text" data-lang="text">SELECT AVG([ALL | DISTINCT] aggregate_expression)
-FROM tables
-WHERE conditions;
-
-SELECT expression1, expression2, ... expression_n,
-       AVG([ALL | DISTINCT] aggregate_expression)
-FROM tables
-WHERE conditions
-GROUP BY expression1, expression2, ... expression_n;
-</code></pre></div>
-<p>Expressions listed within the AVG function and must be included in the GROUP BY clause. </p>
+
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>AVG([ALL | DISTINCT] expression)
+</code></pre></div></div>
 
 <h3 id="avg-examples">AVG Examples</h3>
-<div class="highlight"><pre><code class="language-text" data-lang="text">ALTER SESSION SET `store.json.all_text_mode` = false;
-+-------+------------------------------------+
-|  ok   |              summary               |
-+-------+------------------------------------+
-| true  | store.json.all_text_mode updated.  |
-+-------+------------------------------------+
+
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>ALTER SESSION SET `store.json.all_text_mode` = false;
+|------|-----------------------------------|
+| ok   | summary                           |
+|------|-----------------------------------|
+| true | store.json.all_text_mode updated. |
+|------|-----------------------------------|
 1 row selected (0.073 seconds)
-</code></pre></div>
+</code></pre></div></div>
+
 <p>Take a look at the salaries of employees having IDs 1139, 1140, and 1141. These are the salaries that subsequent examples will average and sum.</p>
-<div class="highlight"><pre><code class="language-text" data-lang="text">SELECT * FROM cp.`employee.json` WHERE employee_id IN (1139, 1140, 1141);
-+--------------+------------------+-------------+------------+--------------+--------------------------+-----------+----------------+-------------+------------------------+-------------+----------------+------------------+-----------------+---------+-----------------------+
-| employee_id  |    full_name     | first_name  | last_name  | position_id  |      position_title      | store_id  | department_id  | birth_date  |       hire_date        |   salary    | supervisor_id  | education_level  | marital_status  | gender  |    management_role    |
-+--------------+------------------+-------------+------------+--------------+--------------------------+-----------+----------------+-------------+------------------------+-------------+----------------+------------------+-----------------+---------+-----------------------+
-| 1139         | Jeanette Belsey  | Jeanette    | Belsey     | 12           | Store Assistant Manager  | 18        | 11             | 1972-05-12  | 1998-01-01 00:00:00.0  | 10000.0000  | 17             | Graduate Degree  | S               | M       | Store Management      |
-| 1140         | Mona Jaramillo   | Mona        | Jaramillo  | 13           | Store Shift Supervisor   | 18        | 11             | 1961-09-24  | 1998-01-01 00:00:00.0  | 8900.0000   | 1139           | Partial College  | S               | M       | Store Management      |
-| 1141         | James Compagno   | James       | Compagno   | 15           | Store Permanent Checker  | 18        | 15             | 1914-02-02  | 1998-01-01 00:00:00.0  | 6400.0000   | 1139           | Graduate Degree  | S               | M       | Store Full Time Staf  |
-+--------------+------------------+-------------+------------+--------------+--------------------------+-----------+----------------+-------------+------------------------+-------------+----------------+------------------+-----------------+---------+-----------------------+
+
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>SELECT * FROM cp.`employee.json` WHERE employee_id IN (1139, 1140, 1141);
+|-------------|-----------------|------------|-----------|-------------|-------------------------|----------|---------------|------------|-----------------------|------------|---------------|-----------------|----------------|--------|----------------------|
+| employee_id | full_name       | first_name | last_name | position_id | position_title          | store_id | department_id | birth_date | hire_date             | salary     | supervisor_id | education_level | marital_status | gender | management_role      |
+|-------------|-----------------|------------|-----------|-------------|-------------------------|----------|---------------|------------|-----------------------|------------|---------------|-----------------|----------------|--------|----------------------|
+| 1139        | Jeanette Belsey | Jeanette   | Belsey    | 12          | Store Assistant Manager | 18       | 11            | 1972-05-12 | 1998-01-01 00:00:00.0 | 10000.0000 | 17            | Graduate Degree | S              | M      | Store Management     |
+| 1140        | Mona Jaramillo  | Mona       | Jaramillo | 13          | Store Shift Supervisor  | 18       | 11            | 1961-09-24 | 1998-01-01 00:00:00.0 | 8900.0000  | 1139          | Partial College | S              | M      | Store Management     |
+| 1141        | James Compagno  | James      | Compagno  | 15          | Store Permanent Checker | 18       | 15            | 1914-02-02 | 1998-01-01 00:00:00.0 | 6400.0000  | 1139          | Graduate Degree | S              | M      | Store Full Time Staf |
+|-------------|-----------------|------------|-----------|-------------|-------------------------|----------|---------------|------------|-----------------------|------------|---------------|-----------------|----------------|--------|----------------------|
 3 rows selected (0.284 seconds)
-</code></pre></div><div class="highlight"><pre><code class="language-text" data-lang="text">SELECT AVG(salary) FROM cp.`employee.json` WHERE employee_id IN (1139, 1140, 1141);
-+--------------------+
-|       EXPR$0       |
-+--------------------+
-| 8433.333333333334  |
-+--------------------+
+
+SELECT AVG(salary) FROM cp.`employee.json` WHERE employee_id IN (1139, 1140, 1141);
+|-------------------|
+| EXPR$0            |
+|-------------------|
+| 8433.333333333334 |
+|-------------------|
 1 row selected (0.208 seconds)
 
 SELECT AVG(ALL salary) FROM cp.`employee.json` WHERE employee_id IN (1139, 1140, 1141);
-+--------------------+
-|       EXPR$0       |
-+--------------------+
-| 8433.333333333334  |
-+--------------------+
+|-------------------|
+| EXPR$0            |
+|-------------------|
+| 8433.333333333334 |
+|-------------------|
 1 row selected (0.17 seconds)
 
 SELECT AVG(DISTINCT salary) FROM cp.`employee.json`;
-+---------------------+
-|       EXPR$0        |
-+---------------------+
-| 12773.333333333334  |
-+---------------------+
+|--------------------|
+| EXPR$0             |
+|--------------------|
+| 12773.333333333334 |
+|--------------------|
 1 row selected (0.384 seconds)
-</code></pre></div><div class="highlight"><pre><code class="language-text" data-lang="text">SELECT education_level, AVG(salary) FROM cp.`employee.json` GROUP BY education_level;
-+----------------------+---------------------+
-|   education_level    |       EXPR$1        |
-+----------------------+---------------------+
-| Graduate Degree      | 4392.823529411765   |
-| Bachelors Degree     | 4492.404181184669   |
-| Partial College      | 4047.1180555555557  |
-| High School Degree   | 3516.1565836298932  |
-| Partial High School  | 3511.0852713178297  |
-+----------------------+---------------------+
+
+SELECT education_level, AVG(salary) FROM cp.`employee.json` GROUP BY education_level;
+|---------------------|--------------------|
+| education_level     | EXPR$1             |
+|---------------------|--------------------|
+| Graduate Degree     | 4392.823529411765  |
+| Bachelors Degree    | 4492.404181184669  |
+| Partial College     | 4047.1180555555557 |
+| High School Degree  | 3516.1565836298932 |
+| Partial High School | 3511.0852713178297 |
+|---------------------|--------------------|
 5 rows selected (0.495 seconds)
-</code></pre></div>
-<h2 id="count">COUNT</h2>
+</code></pre></div></div>
+
+<h2 id="bool_and-and-bool_or">BOOL_AND and BOOL_OR</h2>
+<p>Returns the result of a logical AND (resp. OR) over the specified expression.</p>
+
+<h3 id="bool_and-and-bool_or-syntax">BOOL_AND and BOOL_OR Syntax</h3>
+
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>BOOL_AND(expression)
+BOOL_OR(expression)
+</code></pre></div></div>
+
+<h3 id="bool_and-and-bool_or-examples">BOOL_AND and BOOL_OR Examples</h3>
+
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>SELECT BOOL_AND(last_name = 'Spence') FROM cp.`employee.json`;
+|--------|
+| EXPR$0 |
+|--------|
+| false  |
+|--------|
 
+SELECT BOOL_OR(last_name = 'Spence') FROM cp.`employee.json`;
+|--------|
+| EXPR$0 |
+|--------|
+| true   |
+|--------|
+</code></pre></div></div>
+
+<h3 id="bool_and-and-bool_or-usage-notes">BOOL_AND and BOOL_OR Usage Notes</h3>
+
+<ol>
+  <li>EVERY is nearly an alias for BOOL_AND but returns a TINYINT rather than a BIT.</li>
+</ol>
+
+<h2 id="count">COUNT</h2>
 <p>Returns the number of rows that match the given criteria.</p>
 
 <h3 id="count-syntax">COUNT Syntax</h3>
 
-<p><code>SELECT COUNT([DISTINCT | ALL] column) FROM . . .</code><br>
-<code>SELECT COUNT(*) FROM . . .</code>  </p>
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>SELECT COUNT([ALL | DISTINCT] expression) FROM . . . 
+SELECT COUNT(*) FROM . . .
+</code></pre></div></div>
 
 <ul>
-<li>column<br>
-Returns the number of values of the specified column.<br></li>
-<li>DISTINCT column<br>
-Returns the number of distinct values in the column.<br></li>
-<li>ALL column<br>
-Returns the number of values of the specified column.<br></li>
-<li>* (asterisk)
+  <li>expression<br />
+Returns the number of values of the specified expression.</li>
+  <li>DISTINCT expression<br />
+Returns the number of distinct values in the expression.</li>
+  <li>ALL expression<br />
+Returns the number of values of the specified expression.</li>
+  <li>
+    <ul>
+      <li>(asterisk)
 Returns the number of records in the table.</li>
+    </ul>
+  </li>
 </ul>
 
 <h3 id="count-examples">COUNT Examples</h3>
-<div class="highlight"><pre><code class="language-text" data-lang="text">SELECT COUNT(DISTINCT salary) FROM cp.`employee.json`;
-+---------+
-| EXPR$0  |
-+---------+
-| 48      |
-+---------+
+
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>SELECT COUNT(DISTINCT salary) FROM cp.`employee.json`;
+|--------|
+| EXPR$0 |
+|--------|
+| 48     |
+|--------|
 1 row selected (0.159 seconds)
 
 SELECT COUNT(ALL salary) FROM cp.`employee.json`;
-+---------+
-| EXPR$0  |
-+---------+
-| 1155    |
-+---------+
+|--------|
+| EXPR$0 |
+|--------|
+| 1155   |
+|--------|
 1 row selected (0.106 seconds)
 
 SELECT COUNT(salary) FROM cp.`employee.json`;
-+---------+
-| EXPR$0  |
-+---------+
-| 1155    |
-+---------+
+|--------|
+| EXPR$0 |
+|--------|
+| 1155   |
+|--------|
 1 row selected (0.102 seconds)
 
 SELECT COUNT(*) FROM cp.`employee.json`;
-+---------+
-| EXPR$0  |
-+---------+
-| 1155    |
-+---------+
+|--------|
+| EXPR$0 |
+|--------|
+| 1155   |
+|--------|
 1 row selected (0.174 seconds)
-</code></pre></div>
-<h2 id="min-and-max-functions">MIN and MAX Functions</h2>
+</code></pre></div></div>
 
-<p>These functions return the smallest and largest values of the selected columns, respectively.</p>
+<h2 id="min-and-max">MIN and MAX</h2>
+<p>These functions return the smallest and largest values of the selected expressions, respectively.</p>
 
 <h3 id="min-and-max-syntax">MIN and MAX Syntax</h3>
 
-<p>MIN(column)<br>
-MAX(column)</p>
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>MIN(expression)  
+MAX(expression)
+</code></pre></div></div>
 
 <h3 id="min-and-max-examples">MIN and MAX Examples</h3>
-<div class="highlight"><pre><code class="language-text" data-lang="text">SELECT MIN(salary) FROM cp.`employee.json`;
-+---------+
-| EXPR$0  |
-+---------+
-| 20.0    |
-+---------+
+
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>SELECT MIN(salary) FROM cp.`employee.json`;
+|--------|
+| EXPR$0 |
+|--------|
+| 20.0   |
+|--------|
 1 row selected (0.138 seconds)
 
 SELECT MAX(salary) FROM cp.`employee.json`;
-+----------+
-|  EXPR$0  |
-+----------+
-| 80000.0  |
-+----------+
+|---------|
+| EXPR$0  |
+|---------|
+| 80000.0 |
+|---------|
 1 row selected (0.139 seconds)
-</code></pre></div>
+</code></pre></div></div>
+
 <p>Use a correlated subquery to find the names and salaries of the lowest paid employees:</p>
-<div class="highlight"><pre><code class="language-text" data-lang="text">SELECT full_name, SALARY FROM cp.`employee.json` WHERE salary = (SELECT MIN(salary) FROM cp.`employee.json`);
-+------------------------+---------+
-|       full_name        | SALARY  |
-+------------------------+---------+
-| Leopoldo Renfro        | 20.0    |
-| Donna Brockett         | 20.0    |
-| Laurie Anderson        | 20.0    |
+
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>SELECT full_name, SALARY FROM cp.`employee.json` WHERE salary = (SELECT MIN(salary) FROM cp.`employee.json`);
+|-----------------|--------|
+| full_name       | SALARY |
+|-----------------|--------|
+| Leopoldo Renfro | 20.0   |
+| Donna Brockett  | 20.0   |
+| Laurie Anderson | 20.0   |
 . . .
-</code></pre></div>
-<h2 id="sum-function">SUM Function</h2>
+</code></pre></div></div>
 
-<p>Returns the total of a numeric column.</p>
+<h2 id="sum">SUM</h2>
+<p>Returns the sum of a numerical expresion.</p>
 
 <h3 id="sum-syntax">SUM syntax</h3>
 
-<p><code>SUM(column)</code></p>
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>SUM([DISTINCT | ALL] expression)
+</code></pre></div></div>
 
 <h3 id="examples">Examples</h3>
-<div class="highlight"><pre><code class="language-text" data-lang="text">SELECT SUM(ALL salary) FROM cp.`employee.json`;
-+------------+
-|   EXPR$0   |
-+------------+
-| 4642640.0  |
-+------------+
+
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>SELECT SUM(ALL salary) FROM cp.`employee.json`;
+|-----------|
+| EXPR$0    |
+|-----------|
+| 4642640.0 |
+|-----------|
 1 row selected (0.123 seconds)
 
 SELECT SUM(DISTINCT salary) FROM cp.`employee.json`;
-+-----------+
-|  EXPR$0   |
-+-----------+
-| 613120.0  |
-+-----------+
+|----------|
+| EXPR$0   |
+|----------|
+| 613120.0 |
+|----------|
 1 row selected (0.309 seconds)
 
 SELECT SUM(salary) FROM cp.`employee.json` WHERE employee_id IN (1139, 1140, 1141);
-+----------+
-|  EXPR$0  |
-+----------+
-| 25300.0  |
-+----------+
+|---------|
+| EXPR$0  |
+|---------|
+| 25300.0 |
+|---------|
 1 row selected (1.995 seconds)
-</code></pre></div>
+</code></pre></div></div>
+
 <h2 id="aggregate-statistical-functions">Aggregate Statistical Functions</h2>
 
-<p>Drill provides following aggregate statistics functions:</p>
+<p>The following table lists the aggregate statistical functions that you can use in Drill queries.</p>
+
+<table>
+  <thead>
+    <tr>
+      <th><strong>Function</strong></th>
+      <th><strong>Argument Type</strong></th>
+      <th><strong>Return Type</strong></th>
+    </tr>
+  </thead>
+  <tbody>
+    <tr>
+      <td>APPROX_COUNT_DUPS(expression)</td>
+      <td>any</td>
+      <td>BIGINT</td>
+    </tr>
+    <tr>
+      <td>STDDEV(expression)</td>
+      <td>SMALLINT, INTEGER, BIGINT, FLOAT, DOUBLE, DECIMAL</td>
+      <td>DECIMAL for DECIMAL arguments, otherwise DOUBLE</td>
+    </tr>
+    <tr>
+      <td>STDDEV_POP(expression)</td>
+      <td>SMALLINT, INTEGER, BIGINT, FLOAT, DOUBLE, DECIMAL</td>
+      <td>DECIMAL for DECIMAL arguments, otherwise DOUBLE</td>
+    </tr>
+    <tr>
+      <td>VARIANCE(expression)</td>
+      <td>SMALLINT, INTEGER, BIGINT, FLOAT, DOUBLE, DECIMAL</td>
+      <td>DECIMAL for DECIMAL arguments, otherwise DOUBLE</td>
+    </tr>
+    <tr>
+      <td>VAR_POP(expression)</td>
+      <td>SMALLINT, INTEGER, BIGINT, FLOAT, DOUBLE, DECIMAL</td>
+      <td>DECIMAL for DECIMAL arguments, otherwise DOUBLE</td>
+    </tr>
+  </tbody>
+</table>
+
+<h2 id="approx_count_dups">APPROX_COUNT_DUPS</h2>
+
+<p>Returns an approximate count of the values that are duplicates (not unique).</p>
+
+<h3 id="approx_count_dups-syntax">APPROX_COUNT_DUPS Syntax</h3>
+
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>APPROX_COUNT_DUPS( expression )
+</code></pre></div></div>
+
+<h3 id="approx_count_dups-examples">APPROX_COUNT_DUPS Examples</h3>
+
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>select
+  COUNT(*),
+  APPROX_COUNT_DUPS(e1.employee_id),
+  APPROX_COUNT_DUPS(e1.gender)
+FROM cp.`employee.json` e1
+
+|--------|--------|--------|
+| EXPR$0 | EXPR$1 | EXPR$2 |
+| ------ | ------ | ------ |
+| 1155   | 0      | 1153   |
+|--------|--------|--------|
+</code></pre></div></div>
+
+<p>Use COUNT - APPROX_COUNT_DUPS to approximate a distinct count.</p>
+
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>select
+  COUNT(*),
+  COUNT(salary) - APPROX_COUNT_DUPS(salary),
+  COUNT(distinct salary)
+from cp.`employee.json`;
+
+|--------|--------|--------|
+| EXPR$0 | EXPR$1 | EXPR$2 |
+|--------|--------|--------|
+| 1155   | 48     | 48     |
+|--------|--------|--------|
+</code></pre></div></div>
+
+<h3 id="approx_count_dups-usage-notes">APPROX_COUNT_DUPS Usage Notes</h3>
+
+<p>The underlying Bloom filter is a probabilistic data structure that may return a false positive when an element is tested for duplication.  Consequently, the approximate count returned <em>overestimates</em> the true duplicate count.  In return for this inaccuracy, Bloom filters are highly space- and time-efficient at large scales with the specifics determined by the parameters of the filter (see below).</p>
+
+<h3 id="configuration-options">Configuration options</h3>
+
+<div class="admonition note">
+  <p class="first admonition-title">Note</p>
+  <p class="last">
+The APPROX_COUNT_DUPS function is used internally by Drill when it computes table statistics.  As a result, setting configuration options that affect it in the global configuration scope will affect the computation of table statistics accordingly.
+  </p>
+</div>
 
 <ul>
-<li>stddev(expression)<br>
-An alias for stddev_samp</li>
-<li>stddev_pop(expression)
-Population standard deviate of input values</li>
-<li>stddev_samp(expression)
-Sample standard deviate of input values</li>
-<li>variance(expression)
-An alias for var_samp</li>
-<li>var_pop(expression)
-Population variance of input values (the population standard deviated squared)</li>
-<li>var_samp(expression)
-Sample variance of input values (sample standard deviation squared)</li>
+  <li>exec.statistics.ndv_extrapolation_bf_elements</li>
+  <li>exec.statistics.ndv_extrapolation_bf_fpprobability</li>
 </ul>
 
-<p>These functions take a SMALLINT, INTEGER, BIGINT, FLOAT, DOUBLE, or DECIMAL expression as the argument. The functions return DECIMAL for DECIMAL arguments and DOUBLE for all other arguments.</p>
+<h2 id="stddev">STDDEV</h2>
+
+<p>Returns the sample standard deviation.</p>
+
+<h3 id="stddev-syntax">STDDEV Syntax</h3>
+
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>STDDEV(expression)
+</code></pre></div></div>
+
+<h3 id="stddev-examples">STDDEV Examples</h3>
+
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>SELECT STDDEV(salary) from cp.`employee.json`;
+
+|-------------------|
+| EXPR$0            |
+|-------------------|
+| 5371.847873988941 |
+|-------------------|
+</code></pre></div></div>
+
+<h3 id="stddev-usage-notes">STDDEV Usage Notes</h3>
+
+<ol>
+  <li>Aliases: STDDEV_SAMP</li>
+</ol>
+
+<h2 id="stddev_pop">STDDEV_POP</h2>
+
+<p>Returns the estimate of the population standard deviation obtained by applying Bessel’s correction to the sample standard deviation.</p>
+
+<h3 id="stddev_pop-syntax">STDDEV_POP Syntax</h3>
+
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>STDDEV_POP(expression)
+</code></pre></div></div>
+
+<h3 id="stddev_pop-examples">STDDEV_POP Examples</h3>
+
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>SELECT STDDEV_POP(salary) from cp.`employee.json`;
+
+|-------------------|
+| EXPR$0            |
+|-------------------|
+| 5369.521895151171 |
+|-------------------|
+</code></pre></div></div>
+
+<h2 id="variance">VARIANCE</h2>
+
+<p>Returns the sample variance.</p>
+
+<h3 id="variance-syntax">VARIANCE Syntax</h3>
+
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>VARIANCE(expression)
+</code></pre></div></div>
+
+<h3 id="variance-examples">VARIANCE Examples</h3>
+
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>SELECT VARIANCE(salary) from cp.`employee.json`;
+
+|--------------------|
+| EXPR$0             |
+|--------------------|
+| 28856749.581279505 |
+|--------------------|
+</code></pre></div></div>
+
+<h3 id="variance-usage-notes">VARIANCE Usage Notes</h3>
+
+<ol>
+  <li>Aliases: VAR_SAMP</li>
+</ol>
+
+<h2 id="var_pop">VAR_POP</h2>
+
+<p>Returns the estimate of the population variance obtained by applying Bessel’s correction to the sample variance.</p>
+
+<h3 id="var_pop-syntax">VAR_POP Syntax</h3>
+
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>VAR_POP(expression)
+</code></pre></div></div>
+
+<h3 id="var_pop-examples">VAR_POP Examples</h3>
+
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>SELECT VAR_POP(salary) from cp.`employee.json`;
+
+|--------------------|
+| EXPR$0             |
+|--------------------|
+| 28831765.382507823 |
+|--------------------|
+</code></pre></div></div>
+
 
     
       
@@ -1772,7 +1930,7 @@ Sample variance of input values (sample standard deviation squared)</li>
   <p class="push"></p>
 <div id="footer" class="mw">
 <div class="wrapper">
-Copyright © 2012-2014 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
+Copyright © 2012-2020 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
 Apache and the Apache feather logo are trademarks of The Apache Software Foundation. Other names appearing on the site may be trademarks of their respective owners.<br/><br/>
 </div>
 </div>
diff --git a/docs/aggregate-window-functions/index.html b/docs/aggregate-window-functions/index.html
index 02ac25e..26d7049 100644
--- a/docs/aggregate-window-functions/index.html
+++ b/docs/aggregate-window-functions/index.html
@@ -294,8 +294,6 @@
               
                 <li class="toctree-l3"><a class="reference internal" href="/docs/embedded-mode-prerequisites/">Embedded Mode Prerequisites</a></li>
               
-                <li class="toctree-l3"><a class="reference internal" href="/docs/running-drill-on-docker/">Running Drill on Docker</a></li>
-              
                 <li class="toctree-l3"><a class="reference internal" href="/docs/installing-drill-on-linux-and-mac-os-x/">Installing Drill on Linux and Mac OS X</a></li>
               
                 <li class="toctree-l3"><a class="reference internal" href="/docs/starting-drill-on-linux-and-mac-os-x/">Starting Drill on Linux and Mac OS X</a></li>
@@ -459,10 +457,6 @@
             
           
             
-              <li class="toctree-l2"><a class="reference internal" href="/docs/logfile-plugin/">Logfile Plugin</a></li>
-            
-          
-            
               <li class="toctree-l2"><a class="reference internal" href="/docs/hbase-storage-plugin/">HBase Storage Plugin</a></li>
             
           
@@ -487,10 +481,6 @@
             
           
             
-              <li class="toctree-l2"><a class="reference internal" href="/docs/httpd-format-plugin/">HTTPD Format Plugin</a></li>
-            
-          
-            
               <li class="toctree-l2"><a class="reference internal" href="/docs/opentsdb-storage-plugin/">OpenTSDB Storage Plugin</a></li>
             
           
@@ -499,19 +489,15 @@
             
           
             
-              <li class="toctree-l2"><a class="reference internal" href="/docs/image-metadata-format-plugin/">Image Metadata Format Plugin</a></li>
-            
-          
-            
               <li class="toctree-l2"><a class="reference internal" href="/docs/azure-blob-storage-plugin/">Azure Blob Storage Plugin</a></li>
             
           
             
-              <li class="toctree-l2"><a class="reference internal" href="/docs/syslog-format-plugin/">Syslog Format Plugin</a></li>
+              <li class="toctree-l2"><a class="reference internal" href="/docs/druid-storage-plugin/">Druid Storage Plugin</a></li>
             
           
             
-              <li class="toctree-l2"><a class="reference internal" href="/docs/ltsv-format-plugin/">LTSV Format Plugin</a></li>
+              <li class="toctree-l2"><a class="reference internal" href="/docs/http-storage-plugin/">HTTP Storage Plugin</a></li>
             
           
           </ul>
@@ -886,6 +872,12 @@
               
                 <li class="toctree-l3"><a class="reference internal" href="/docs/cryptography-functions/">Cryptography Functions</a></li>
               
+                <li class="toctree-l3"><a class="reference internal" href="/docs/sql-dialect-compatibility-functions/">SQL dialect compatibility functions</a></li>
+              
+                <li class="toctree-l3"><a class="reference internal" href="/docs/gis-functions/">GIS functions</a></li>
+              
+                <li class="toctree-l3"><a class="reference internal" href="/docs/time-series-analysis-functions/">Time Series Analysis Functions</a></li>
+              
             </ul>
             
           
@@ -1043,6 +1035,10 @@
             
           
             
+              <li class="toctree-l2"><a class="reference internal" href="/docs/logfile-plugin/">Logfile Plugin</a></li>
+            
+          
+            
               <li class="toctree-l2"><a class="reference internal" href="/docs/json-data-model/">JSON Data Model</a></li>
             
           
@@ -1054,6 +1050,38 @@
               <li class="toctree-l2"><a class="reference internal" href="/docs/sequence-files/">Sequence Files</a></li>
             
           
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/httpd-format-plugin/">HTTPD Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/image-metadata-format-plugin/">Image Metadata Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/syslog-format-plugin/">Syslog Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/ltsv-format-plugin/">LTSV Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/spss-format-plugin/">SPSS Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/esri-shapefile-format-plugin/">ESRI Shapefile Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/excel-format-plugin/">Excel Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/hdf5-format-plugin/">HDF5 Format Plugin</a></li>
+            
+          
           </ul>
         
       
@@ -1171,6 +1199,10 @@
           <ul style="display: none">
           
             
+              <li class="toctree-l2"><a class="reference internal" href="/docs/apache-drill-1-18-0-release-notes/">Apache Drill 1.18.0 Release Notes</a></li>
+            
+          
+            
               <li class="toctree-l2"><a class="reference internal" href="/docs/apache-drill-1-17-0-release-notes/">Apache Drill 1.17.0 Release Notes</a></li>
             
           
@@ -1337,95 +1369,98 @@
 
     </div>
 
-     Jun 26, 2018
-
-    <link href="/css/docpage.css" rel="stylesheet" type="text/css">
+    <!-- jt: we don't need to display a last-modified date on each page to users
+     Dec 10, 2020
+    --> 
 
     <div class="int_text" align="left">
       
-        <p>Window functions operate on a set of rows and return a single value for each row from the underlying query. The OVER() clause differentiates window functions from other analytical and reporting functions. See <a href="/docs/sql-window-functions-introduction/">SQL Window Functions Introduction</a>. You can use certain aggregate functions as window functions in Drill. </p>
+        <p>Window functions operate on a set of rows and return a single value for each row from the underlying query. The OVER() clause differentiates window functions from other analytical and reporting functions. See <a href="/docs/sql-window-functions-introduction/">SQL Window Functions Introduction</a>. You can use certain aggregate functions as window functions in Drill.</p>
 
-<p>The following table lists the aggregate window functions with supported data types and descriptions:  </p>
+<p>The following table lists the aggregate window functions with supported data types and descriptions:</p>
 
-<table><thead>
-<tr>
-<th><strong>Window   Function</strong></th>
-<th><strong>Argument Type</strong></th>
-<th><strong>Return Type</strong></th>
-<th><strong>Description</strong></th>
-</tr>
-</thead><tbody>
-<tr>
-<td>AVG()</td>
-<td>SMALLINT,   INTEGER, BIGINT, FLOAT, DOUBLE, DECIMAL, INTERVALYEAR or INTERVALDAY</td>
-<td>DECIMAL   for DECIMAL argument, DOUBLE for all other arguments</td>
-<td>The   AVG window function returns the average value for the input expression   values. The AVG function works with numeric values and ignores NULL values.</td>
-</tr>
-<tr>
-<td>COUNT()</td>
-<td>All   argument data types</td>
-<td>BIGINT</td>
-<td>The   COUNT() window function counts the number of input rows. COUNT(*) counts all   of the rows in the target table if they do or do not include nulls.   COUNT(expression) computes the number of rows with non-NULL values in a   specific column or expression.</td>
-</tr>
-<tr>
-<td>MAX()</td>
-<td>BINARY,   DECIMAL, VARCHAR, DATE, TIME, or TIMESTAMP</td>
-<td>Same   as argument type</td>
-<td>The   MAX() window function returns the maximum value of the expression across all   input values. The MAX function works with numeric values and ignores NULL   values.</td>
-</tr>
-<tr>
-<td>MIN()</td>
-<td>BINARY,   DECIMAL, VARCHAR, DATE, TIME, or TIMESTAMP</td>
-<td>Same   as argument type</td>
-<td>The   MIN () window function returns the minimum value of the expression across all   input values. The MIN function works with numeric values and ignores NULL   values.</td>
-</tr>
-<tr>
-<td>SUM()</td>
-<td>SMALLINT,   INTEGER, BIGINT, FLOAT, DOUBLE, DECIMAL, INTERVALDAY, or INTERVALYEAR</td>
-<td>DECIMAL for DECIMAL argument,     BIGINT for any integer-type argument (including BIGINT), DOUBLE for   floating-point arguments</td>
-<td>The   SUM () window function returns the sum of the expression across all input   values. The SUM function works with numeric values and ignores NULL values.</td>
-</tr>
-</tbody></table>
+<table>
+  <thead>
+    <tr>
+      <th><strong>Window   Function</strong></th>
+      <th><strong>Argument Type</strong></th>
+      <th><strong>Return Type</strong></th>
+      <th><strong>Description</strong></th>
+    </tr>
+  </thead>
+  <tbody>
+    <tr>
+      <td>AVG()</td>
+      <td>SMALLINT,   INTEGER, BIGINT, FLOAT, DOUBLE, DECIMAL, INTERVALYEAR or INTERVALDAY</td>
+      <td>DECIMAL   for DECIMAL argument, DOUBLE for all other arguments</td>
+      <td>The   AVG window function returns the average value for the input expression   values. The AVG function works with numeric values and ignores NULL values.</td>
+    </tr>
+    <tr>
+      <td>COUNT()</td>
+      <td>All   argument data types</td>
+      <td>BIGINT</td>
+      <td>The   COUNT() window function counts the number of input rows. COUNT(*) counts all   of the rows in the target table if they do or do not include nulls.   COUNT(expression) computes the number of rows with non-NULL values in a   specific column or expression.</td>
+    </tr>
+    <tr>
+      <td>MAX()</td>
+      <td>BINARY,   DECIMAL, VARCHAR, DATE, TIME, or TIMESTAMP</td>
+      <td>Same   as argument type</td>
+      <td>The   MAX() window function returns the maximum value of the expression across all   input values. The MAX function works with numeric values and ignores NULL   values.</td>
+    </tr>
+    <tr>
+      <td>MIN()</td>
+      <td>BINARY,   DECIMAL, VARCHAR, DATE, TIME, or TIMESTAMP</td>
+      <td>Same   as argument type</td>
+      <td>The   MIN () window function returns the minimum value of the expression across all   input values. The MIN function works with numeric values and ignores NULL   values.</td>
+    </tr>
+    <tr>
+      <td>SUM()</td>
+      <td>SMALLINT,   INTEGER, BIGINT, FLOAT, DOUBLE, DECIMAL, INTERVALDAY, or INTERVALYEAR</td>
+      <td>DECIMAL for DECIMAL argument,     BIGINT for any integer-type argument (including BIGINT), DOUBLE for   floating-point arguments</td>
+      <td>The   SUM () window function returns the sum of the expression across all input   values. The SUM function works with numeric values and ignores NULL values.</td>
+    </tr>
+  </tbody>
+</table>
 
 <h2 id="syntax">Syntax</h2>
-<div class="highlight"><pre><code class="language-text" data-lang="text">   window_function ( [ ALL ] expression ) 
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>   window_function ( [ ALL ] expression ) 
    OVER ( [ PARTITION BY expr_list ] [ ORDER BY order_list frame_clause ] )
-</code></pre></div>
+</code></pre></div></div>
+
 <h2 id="arguments">Arguments</h2>
 
-<p><em>window_function</em><br>
-One of the following supported aggregate functions:<br>
-AVG(), COUNT(), MAX(), MIN(), SUM() </p>
+<p><em>window_function</em><br />
+One of the following supported aggregate functions:<br />
+AVG(), COUNT(), MAX(), MIN(), SUM()</p>
 
-<p><em>expression</em><br>
-The target column or expression that the function operates on.  </p>
+<p><em>expression</em><br />
+The target column or expression that the function operates on.</p>
 
-<p>ALL<br>
-When you include ALL, the function retains all duplicate values from the expression. ALL is the default. DISTINCT is not supported.  </p>
+<p>ALL<br />
+When you include ALL, the function retains all duplicate values from the expression. ALL is the default. DISTINCT is not supported.</p>
 
-<p>OVER<br>
-Specifies the window clauses for the aggregation functions. The OVER clause distinguishes window aggregation functions from normal set aggregation functions.  </p>
+<p>OVER<br />
+Specifies the window clauses for the aggregation functions. The OVER clause distinguishes window aggregation functions from normal set aggregation functions.</p>
 
-<p>PARTITION BY <em>expr_list</em><br>
-Defines the window for the window function in terms of one or more expressions.  </p>
+<p>PARTITION BY <em>expr_list</em><br />
+Defines the window for the window function in terms of one or more expressions.</p>
 
-<p>ORDER BY <em>order_list</em><br>
-Sorts the rows within each partition. If PARTITION BY is not specified, ORDER BY uses the entire table.  </p>
+<p>ORDER BY <em>order_list</em><br />
+Sorts the rows within each partition. If PARTITION BY is not specified, ORDER BY uses the entire table.</p>
 
-<p><em>frame_clause</em><br>
-If an ORDER BY clause is used for an aggregate function, an explicit frame clause is required. The frame clause refines the set of rows in a function&#39;s window, including or excluding sets of rows within the ordered result. The frame clause consists of the ROWS or RANGE keyword and associated specifiers.</p>
+<p><em>frame_clause</em><br />
+If an ORDER BY clause is used for an aggregate function, an explicit frame clause is required. The frame clause refines the set of rows in a function’s window, including or excluding sets of rows within the ordered result. The frame clause consists of the ROWS or RANGE keyword and associated specifiers.</p>
 
 <h2 id="examples">Examples</h2>
-
 <p>The following examples show queries that use each of the aggregate window functions in Drill. See <a href="/docs/sql-window-functions-examples/">SQL Window Functions Examples</a> for information about the data and setup for these examples.</p>
 
 <h3 id="avg">AVG()</h3>
+<p>The following query uses the AVG() window function with the PARTITION BY clause to calculate the average sales for each car dealer in Q1.</p>
 
-<p>The following query uses the AVG() window function with the PARTITION BY clause to calculate the average sales for each car dealer in Q1.  </p>
-<div class="highlight"><pre><code class="language-text" data-lang="text">   select dealer_id, sales, avg(sales) over (partition by dealer_id) as avgsales from q1_sales;
-   +------------+--------+-----------+
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>   select dealer_id, sales, avg(sales) over (partition by dealer_id) as avgsales from q1_sales;
+   |------------|--------|-----------|
    | dealer_id  | sales  | avgsales  |
-   +------------+--------+-----------+
+   |------------|--------|-----------|
    | 1          | 19745  | 14357     |
    | 1          | 19745  | 14357     |
    | 1          | 8227   | 14357     |
@@ -1436,16 +1471,17 @@ If an ORDER BY clause is used for an aggregate function, an explicit frame claus
    | 3          | 15427  | 12368     |
    | 3          | 12369  | 12368     |
    | 3          | 9308   | 12368     |
-   +------------+--------+-----------+
+   |------------|--------|-----------|
    10 rows selected (0.455 seconds)
-</code></pre></div>
+</code></pre></div></div>
+
 <h3 id="count">COUNT()</h3>
+<p>The following query uses the COUNT (*) window function to count the number of sales in Q1, ordered by dealer_id. The word count is enclosed in back ticks (``) because it is a reserved keyword in Drill.</p>
 
-<p>The following query uses the COUNT (*) window function to count the number of sales in Q1, ordered by dealer_id. The word count is enclosed in back ticks (``) because it is a reserved keyword in Drill.  </p>
-<div class="highlight"><pre><code class="language-text" data-lang="text">   select dealer_id, sales, count(*) over(order by dealer_id) as `count` from q1_sales;
-   +------------+--------+--------+
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>   select dealer_id, sales, count(*) over(order by dealer_id) as `count` from q1_sales;
+   |------------|--------|--------|
    | dealer_id  | sales  | count  |
-   +------------+--------+--------+
+   |------------|--------|--------|
    | 1          | 19745  | 4      |
    | 1          | 19745  | 4      |
    | 1          | 8227   | 4      |
@@ -1456,14 +1492,16 @@ If an ORDER BY clause is used for an aggregate function, an explicit frame claus
    | 3          | 15427  | 10     |
    | 3          | 12369  | 10     |
    | 3          | 9308   | 10     |
-   +------------+--------+--------+
+   |------------|--------|--------|
    10 rows selected (0.215 seconds) 
-</code></pre></div>
-<p>The following query uses the COUNT() window function to count the total number of sales for each dealer in Q1. </p>
-<div class="highlight"><pre><code class="language-text" data-lang="text">   select dealer_id, sales, count(sales) over(partition by dealer_id) as `count` from q1_sales;
-   +------------+--------+--------+
+</code></pre></div></div>
+
+<p>The following query uses the COUNT() window function to count the total number of sales for each dealer in Q1.</p>
+
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>   select dealer_id, sales, count(sales) over(partition by dealer_id) as `count` from q1_sales;
+   |------------|--------|--------|
    | dealer_id  | sales  | count  |
-   +------------+--------+--------+
+   |------------|--------|--------|
    | 1          | 19745  | 4      |
    | 1          | 19745  | 4      |
    | 1          | 8227   | 4      |
@@ -1474,16 +1512,17 @@ If an ORDER BY clause is used for an aggregate function, an explicit frame claus
    | 3          | 15427  | 3      |
    | 3          | 12369  | 3      |
    | 3          | 9308   | 3      |
-   +------------+--------+--------+
+   |------------|--------|--------|
    10 rows selected (0.249 seconds)
-</code></pre></div>
+</code></pre></div></div>
+
 <h3 id="max">MAX()</h3>
+<p>The following query uses the MAX() window function with the PARTITION BY clause to identify the employee with the maximum number of car sales in Q1 at each dealership. The word max is a reserved keyword in Drill and must be enclosed in back ticks (``).</p>
 
-<p>The following query uses the MAX() window function with the PARTITION BY clause to identify the employee with the maximum number of car sales in Q1 at each dealership. The word max is a reserved keyword in Drill and must be enclosed in back ticks (``).  </p>
-<div class="highlight"><pre><code class="language-text" data-lang="text">   select emp_name, dealer_id, sales, max(sales) over(partition by dealer_id) as `max` from q1_sales;
-   +-----------------+------------+--------+--------+
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>   select emp_name, dealer_id, sales, max(sales) over(partition by dealer_id) as `max` from q1_sales;
+   |-----------------|------------|--------|--------|
    |    emp_name     | dealer_id  | sales  |  max   |
-   +-----------------+------------+--------+--------+
+   |-----------------|------------|--------|--------|
    | Ferris Brown    | 1          | 19745  | 19745  |
    | Noel Meyer      | 1          | 19745  | 19745  |
    | Raphael Hull    | 1          | 8227   | 19745  |
@@ -1494,16 +1533,18 @@ If an ORDER BY clause is used for an aggregate function, an explicit frame claus
    | Ursa George     | 3          | 15427  | 15427  |
    | Abel Kim        | 3          | 12369  | 15427  |
    | May Stout       | 3          | 9308   | 15427  |
-   +-----------------+------------+--------+--------+
+   |-----------------|------------|--------|--------|
    10 rows selected (0.402 seconds)
-</code></pre></div>
+</code></pre></div></div>
+
 <h3 id="min">MIN()</h3>
 
-<p>The following query uses the MIN() window function with the PARTITION BY clause to identify the employee with the minimum number of car sales in Q1 at each dealership. The word min is a reserved keyword in Drill and must be enclosed in back ticks (``).  </p>
-<div class="highlight"><pre><code class="language-text" data-lang="text">   select emp_name, dealer_id, sales, min(sales) over(partition by dealer_id) as `min` from q1_sales;
-   +-----------------+------------+--------+-------+
+<p>The following query uses the MIN() window function with the PARTITION BY clause to identify the employee with the minimum number of car sales in Q1 at each dealership. The word min is a reserved keyword in Drill and must be enclosed in back ticks (``).</p>
+
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>   select emp_name, dealer_id, sales, min(sales) over(partition by dealer_id) as `min` from q1_sales;
+   |-----------------|------------|--------|-------|
    |    emp_name     | dealer_id  | sales  |  min  |
-   +-----------------+------------+--------+-------+
+   |-----------------|------------|--------|-------|
    | Ferris Brown    | 1          | 19745  | 8227  |
    | Noel Meyer      | 1          | 19745  | 8227  |
    | Raphael Hull    | 1          | 8227   | 8227  |
@@ -1514,16 +1555,17 @@ If an ORDER BY clause is used for an aggregate function, an explicit frame claus
    | Ursa George     | 3          | 15427  | 9308  |
    | Abel Kim        | 3          | 12369  | 9308  |
    | May Stout       | 3          | 9308   | 9308  |
-   +-----------------+------------+--------+-------+
+   |-----------------|------------|--------|-------|
    10 rows selected (0.194 seconds)
-</code></pre></div>
+</code></pre></div></div>
+
 <h3 id="sum">SUM()</h3>
+<p>The following query uses the SUM() window function to total the amount of sales for each dealer in Q1. The word sum is a reserved keyword in Drill and must be enclosed in back ticks (``).</p>
 
-<p>The following query uses the SUM() window function to total the amount of sales for each dealer in Q1. The word sum is a reserved keyword in Drill and must be enclosed in back ticks (``).  </p>
-<div class="highlight"><pre><code class="language-text" data-lang="text">   select dealer_id, emp_name, sales, sum(sales) over(partition by dealer_id) as `sum` from q1_sales;
-   +------------+-----------------+--------+--------+
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>   select dealer_id, emp_name, sales, sum(sales) over(partition by dealer_id) as `sum` from q1_sales;
+   |------------|-----------------|--------|--------|
    | dealer_id  |    emp_name     | sales  |  sum   |
-   +------------+-----------------+--------+--------+
+   |------------|-----------------|--------|--------|
    | 1          | Ferris Brown    | 19745  | 57427  |
    | 1          | Noel Meyer      | 19745  | 57427  |
    | 1          | Raphael Hull    | 8227   | 57427  |
@@ -1534,9 +1576,11 @@ If an ORDER BY clause is used for an aggregate function, an explicit frame claus
    | 3          | Ursa George     | 15427  | 37104  |
    | 3          | Abel Kim        | 12369  | 37104  |
    | 3          | May Stout       | 9308   | 37104  |
-   +------------+-----------------+--------+--------+
+   |------------|-----------------|--------|--------|
    10 rows selected (0.198 seconds)
-</code></pre></div>
+</code></pre></div></div>
+
+
     
       
         <div class="doc-nav">
@@ -1553,7 +1597,7 @@ If an ORDER BY clause is used for an aggregate function, an explicit frame claus
   <p class="push"></p>
 <div id="footer" class="mw">
 <div class="wrapper">
-Copyright © 2012-2014 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
+Copyright © 2012-2020 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
 Apache and the Apache feather logo are trademarks of The Apache Software Foundation. Other names appearing on the site may be trademarks of their respective owners.<br/><br/>
 </div>
 </div>
diff --git a/docs/alter-system/index.html b/docs/alter-system/index.html
index 4f9971d..551b34e 100644
--- a/docs/alter-system/index.html
+++ b/docs/alter-system/index.html
@@ -294,8 +294,6 @@
               
                 <li class="toctree-l3"><a class="reference internal" href="/docs/embedded-mode-prerequisites/">Embedded Mode Prerequisites</a></li>
               
-                <li class="toctree-l3"><a class="reference internal" href="/docs/running-drill-on-docker/">Running Drill on Docker</a></li>
-              
                 <li class="toctree-l3"><a class="reference internal" href="/docs/installing-drill-on-linux-and-mac-os-x/">Installing Drill on Linux and Mac OS X</a></li>
               
                 <li class="toctree-l3"><a class="reference internal" href="/docs/starting-drill-on-linux-and-mac-os-x/">Starting Drill on Linux and Mac OS X</a></li>
@@ -459,10 +457,6 @@
             
           
             
-              <li class="toctree-l2"><a class="reference internal" href="/docs/logfile-plugin/">Logfile Plugin</a></li>
-            
-          
-            
               <li class="toctree-l2"><a class="reference internal" href="/docs/hbase-storage-plugin/">HBase Storage Plugin</a></li>
             
           
@@ -487,10 +481,6 @@
             
           
             
-              <li class="toctree-l2"><a class="reference internal" href="/docs/httpd-format-plugin/">HTTPD Format Plugin</a></li>
-            
-          
-            
               <li class="toctree-l2"><a class="reference internal" href="/docs/opentsdb-storage-plugin/">OpenTSDB Storage Plugin</a></li>
             
           
@@ -499,19 +489,15 @@
             
           
             
-              <li class="toctree-l2"><a class="reference internal" href="/docs/image-metadata-format-plugin/">Image Metadata Format Plugin</a></li>
-            
-          
-            
               <li class="toctree-l2"><a class="reference internal" href="/docs/azure-blob-storage-plugin/">Azure Blob Storage Plugin</a></li>
             
           
             
-              <li class="toctree-l2"><a class="reference internal" href="/docs/syslog-format-plugin/">Syslog Format Plugin</a></li>
+              <li class="toctree-l2"><a class="reference internal" href="/docs/druid-storage-plugin/">Druid Storage Plugin</a></li>
             
           
             
-              <li class="toctree-l2"><a class="reference internal" href="/docs/ltsv-format-plugin/">LTSV Format Plugin</a></li>
+              <li class="toctree-l2"><a class="reference internal" href="/docs/http-storage-plugin/">HTTP Storage Plugin</a></li>
             
           
           </ul>
@@ -886,6 +872,12 @@
               
                 <li class="toctree-l3"><a class="reference internal" href="/docs/cryptography-functions/">Cryptography Functions</a></li>
               
+                <li class="toctree-l3"><a class="reference internal" href="/docs/sql-dialect-compatibility-functions/">SQL dialect compatibility functions</a></li>
+              
+                <li class="toctree-l3"><a class="reference internal" href="/docs/gis-functions/">GIS functions</a></li>
+              
+                <li class="toctree-l3"><a class="reference internal" href="/docs/time-series-analysis-functions/">Time Series Analysis Functions</a></li>
+              
             </ul>
             
           
@@ -1043,6 +1035,10 @@
             
           
             
+              <li class="toctree-l2"><a class="reference internal" href="/docs/logfile-plugin/">Logfile Plugin</a></li>
+            
+          
+            
               <li class="toctree-l2"><a class="reference internal" href="/docs/json-data-model/">JSON Data Model</a></li>
             
           
@@ -1054,6 +1050,38 @@
               <li class="toctree-l2"><a class="reference internal" href="/docs/sequence-files/">Sequence Files</a></li>
             
           
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/httpd-format-plugin/">HTTPD Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/image-metadata-format-plugin/">Image Metadata Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/syslog-format-plugin/">Syslog Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/ltsv-format-plugin/">LTSV Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/spss-format-plugin/">SPSS Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/esri-shapefile-format-plugin/">ESRI Shapefile Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/excel-format-plugin/">Excel Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/hdf5-format-plugin/">HDF5 Format Plugin</a></li>
+            
+          
           </ul>
         
       
@@ -1171,6 +1199,10 @@
           <ul style="display: none">
           
             
+              <li class="toctree-l2"><a class="reference internal" href="/docs/apache-drill-1-18-0-release-notes/">Apache Drill 1.18.0 Release Notes</a></li>
+            
+          
+            
               <li class="toctree-l2"><a class="reference internal" href="/docs/apache-drill-1-17-0-release-notes/">Apache Drill 1.17.0 Release Notes</a></li>
             
           
@@ -1337,9 +1369,9 @@
 
     </div>
 
-     Nov 2, 2018
-
-    <link href="/css/docpage.css" rel="stylesheet" type="text/css">
+    <!-- jt: we don't need to display a last-modified date on each page to users
+     Dec 10, 2020
+    --> 
 
     <div class="int_text" align="left">
       
@@ -1350,10 +1382,12 @@ settings.</p>
 <h2 id="syntax">Syntax</h2>
 
 <p>The ALTER SYSTEM command supports the following syntax:</p>
-<div class="highlight"><pre><code class="language-text" data-lang="text">ALTER SYSTEM SET `option_name` = value;  
+
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>ALTER SYSTEM SET `option_name` = value;  
 ALTER SYSTEM RESET `option_name`;
 ALTER SYSTEM RESET ALL;
-</code></pre></div>
+</code></pre></div></div>
+
 <h2 id="parameters">Parameters</h2>
 
 <p><em>option_name</em></p>
@@ -1371,24 +1405,26 @@ or float. Use the appropriate value type for each option that you set.</p>
 execution options per cluster. Options set at the system level affect the
 entire system and persist between restarts.</p>
 
-<p>Using ALTER SYSTEM RESET changes the value of an option back to the default system setting. </p>
+<p>Using ALTER SYSTEM RESET changes the value of an option back to the default system setting.</p>
 
-<p>Using ALTER SYSTEM RESET ALL changes the value of every option back to the default system setting.  </p>
+<p>Using ALTER SYSTEM RESET ALL changes the value of every option back to the default system setting.</p>
 
 <p>You can run the following query to see a complete list of planning and
 execution options that are currently set at the system or session level:</p>
-<div class="highlight"><pre><code class="language-text" data-lang="text">0: jdbc:drill:zk=local&gt; SELECT name, type FROM sys.options WHERE type in (&#39;SYSTEM&#39;,&#39;SESSION&#39;) order by name;
-+------------+------------------------------------------------+
-|   name                                         |    type    |
-+----------------------------------------------+--------------+
-| drill.exec.functions.cast_empty_string_to_null | SYSTEM     |
-| drill.exec.storage.file.partition.column.label | SYSTEM     |
-| exec.errors.verbose                            | SYSTEM     |
-| exec.java_compiler                             | SYSTEM     |
-| exec.java_compiler_debug                       | SYSTEM     |
-…
-+------------+------------------------------------------------+
-</code></pre></div>
+
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>0: jdbc:drill:zk=local&gt; SELECT name, type FROM sys.options WHERE type in ('SYSTEM','SESSION') order by name;
+|------------------------------------------------|--------|
+| name                                           | type   |
+|------------------------------------------------|--------|
+| drill.exec.functions.cast_empty_string_to_null | SYSTEM |
+| drill.exec.storage.file.partition.column.label | SYSTEM |
+| exec.errors.verbose                            | SYSTEM |
+| exec.java_compiler                             | SYSTEM |
+| exec.java_compiler_debug                       | SYSTEM |
+| …                                              |        |
+|------------------------------------------------|--------|
+</code></pre></div></div>
+
 <div class="admonition note">
   <p class="first admonition-title">Note</p>
   <p class="last">This is a truncated version of the list.  </p>
@@ -1397,40 +1433,49 @@ execution options that are currently set at the system or session level:</p>
 <h2 id="example">Example</h2>
 
 <p>This example demonstrates how to use the ALTER SYSTEM command to set the
-<code>planner.add_producer_consumer</code> option to “true.” This option enables a
+<code class="language-plaintext highlighter-rouge">planner.add_producer_consumer</code> option to “true.” This option enables a
 secondary reading thread to prefetch data from disk.</p>
-<div class="highlight"><pre><code class="language-text" data-lang="text">0: jdbc:drill:zk=local&gt; alter system set `planner.add_producer_consumer` = true;
-+------------+------------+
-|   ok  |  summary   |
-+------------+------------+
-| true      | planner.add_producer_consumer updated. |
-+------------+------------+
+
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>0: jdbc:drill:zk=local&gt; alter system set `planner.add_producer_consumer` = true;
+|------|----------------------------------------|
+| ok   | summary                                |
+|------|----------------------------------------|
+| true | planner.add_producer_consumer updated. |
+|------|----------------------------------------|
 1 row selected (0.046 seconds)
-</code></pre></div>
+</code></pre></div></div>
+
 <p>You can issue a query to see all of the system level settings set to “true.”
 Note that the option type is case-sensitive.</p>
-<div class="highlight"><pre><code class="language-text" data-lang="text">0: jdbc:drill:zk=local&gt; SELECT name, type, bool_val FROM sys.options WHERE type = &#39;SYSTEM&#39; and bool_val=true;
-+------------+------------+------------+
-|   name    |   type    |  bool_val  |
-+------------+------------+------------+
-| exec.java_compiler_debug | SYSTEM     | true      |
-| planner.enable_mergejoin | SYSTEM     | true      |
-| planner.enable_broadcast_join | SYSTEM    | true      |
-| planner.enable_hashagg | SYSTEM   | true      |
-| planner.add_producer_consumer | SYSTEM    | true      |
-| planner.enable_hash_single_key | SYSTEM   | true      |
-| planner.enable_multiphase_agg | SYSTEM    | true      |
-| planner.enable_streamagg | SYSTEM     | true      |
-| planner.enable_hashjoin | SYSTEM  | true      |
-+------------+------------+------------+
+
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>0: jdbc:drill:zk=local&gt; SELECT name, type, bool_val FROM sys.options WHERE type = 'SYSTEM' and bool_val=true;
+|--------------------------------|--------|----------|
+| name                           | type   | bool_val |
+|--------------------------------|--------|----------|
+| exec.java_compiler_debug       | SYSTEM | true     |
+| planner.enable_mergejoin       | SYSTEM | true     |
+| planner.enable_broadcast_join  | SYSTEM | true     |
+| planner.enable_hashagg         | SYSTEM | true     |
+| planner.add_producer_consumer  | SYSTEM | true     |
+| planner.enable_hash_single_key | SYSTEM | true     |
+| planner.enable_multiphase_agg  | SYSTEM | true     |
+| planner.enable_streamagg       | SYSTEM | true     |
+| planner.enable_hashjoin        | SYSTEM | true     |
+|--------------------------------|--------|----------|
 9 rows selected (0.159 seconds)  
-</code></pre></div>
-<p>Issuing the ALTER SYSTEM RESET command resets the option back to the default system value (false):  </p>
-<div class="highlight"><pre><code class="language-text" data-lang="text">0: jdbc:drill:zk=local&gt; ALTER SYSTEM RESET `planner.add_producer_consumer`;  
-</code></pre></div>
-<p>Issuing the ALTER SYSTEM RESET ALL command resets all options back to their default system values:  </p>
-<div class="highlight"><pre><code class="language-text" data-lang="text">0: jdbc:drill:zk=local&gt; ALTER SYSTEM RESET ALL;
-</code></pre></div>
+</code></pre></div></div>
+
+<p>Issuing the ALTER SYSTEM RESET command resets the option back to the default system value (false):</p>
+
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>0: jdbc:drill:zk=local&gt; ALTER SYSTEM RESET `planner.add_producer_consumer`;  
+</code></pre></div></div>
+
+<p>Issuing the ALTER SYSTEM RESET ALL command resets all options back to their default system values:</p>
+
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>0: jdbc:drill:zk=local&gt; ALTER SYSTEM RESET ALL;
+</code></pre></div></div>
+
+
     
       
         <div class="doc-nav">
@@ -1447,7 +1492,7 @@ Note that the option type is case-sensitive.</p>
   <p class="push"></p>
 <div id="footer" class="mw">
 <div class="wrapper">
-Copyright © 2012-2014 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
+Copyright © 2012-2020 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
 Apache and the Apache feather logo are trademarks of The Apache Software Foundation. Other names appearing on the site may be trademarks of their respective owners.<br/><br/>
 </div>
 </div>
diff --git a/docs/analyze-table-compute-statistics/index.html b/docs/analyze-table-compute-statistics/index.html
index eaa014a..fbf402d 100644
--- a/docs/analyze-table-compute-statistics/index.html
+++ b/docs/analyze-table-compute-statistics/index.html
@@ -294,8 +294,6 @@
               
                 <li class="toctree-l3"><a class="reference internal" href="/docs/embedded-mode-prerequisites/">Embedded Mode Prerequisites</a></li>
               
-                <li class="toctree-l3"><a class="reference internal" href="/docs/running-drill-on-docker/">Running Drill on Docker</a></li>
-              
                 <li class="toctree-l3"><a class="reference internal" href="/docs/installing-drill-on-linux-and-mac-os-x/">Installing Drill on Linux and Mac OS X</a></li>
               
                 <li class="toctree-l3"><a class="reference internal" href="/docs/starting-drill-on-linux-and-mac-os-x/">Starting Drill on Linux and Mac OS X</a></li>
@@ -459,10 +457,6 @@
             
           
             
-              <li class="toctree-l2"><a class="reference internal" href="/docs/logfile-plugin/">Logfile Plugin</a></li>
-            
-          
-            
               <li class="toctree-l2"><a class="reference internal" href="/docs/hbase-storage-plugin/">HBase Storage Plugin</a></li>
             
           
@@ -487,10 +481,6 @@
             
           
             
-              <li class="toctree-l2"><a class="reference internal" href="/docs/httpd-format-plugin/">HTTPD Format Plugin</a></li>
-            
-          
-            
               <li class="toctree-l2"><a class="reference internal" href="/docs/opentsdb-storage-plugin/">OpenTSDB Storage Plugin</a></li>
             
           
@@ -499,19 +489,15 @@
             
           
             
-              <li class="toctree-l2"><a class="reference internal" href="/docs/image-metadata-format-plugin/">Image Metadata Format Plugin</a></li>
-            
-          
-            
               <li class="toctree-l2"><a class="reference internal" href="/docs/azure-blob-storage-plugin/">Azure Blob Storage Plugin</a></li>
             
           
             
-              <li class="toctree-l2"><a class="reference internal" href="/docs/syslog-format-plugin/">Syslog Format Plugin</a></li>
+              <li class="toctree-l2"><a class="reference internal" href="/docs/druid-storage-plugin/">Druid Storage Plugin</a></li>
             
           
             
-              <li class="toctree-l2"><a class="reference internal" href="/docs/ltsv-format-plugin/">LTSV Format Plugin</a></li>
+              <li class="toctree-l2"><a class="reference internal" href="/docs/http-storage-plugin/">HTTP Storage Plugin</a></li>
             
           
           </ul>
@@ -886,6 +872,12 @@
               
                 <li class="toctree-l3"><a class="reference internal" href="/docs/cryptography-functions/">Cryptography Functions</a></li>
               
+                <li class="toctree-l3"><a class="reference internal" href="/docs/sql-dialect-compatibility-functions/">SQL dialect compatibility functions</a></li>
+              
+                <li class="toctree-l3"><a class="reference internal" href="/docs/gis-functions/">GIS functions</a></li>
+              
+                <li class="toctree-l3"><a class="reference internal" href="/docs/time-series-analysis-functions/">Time Series Analysis Functions</a></li>
+              
             </ul>
             
           
@@ -1043,6 +1035,10 @@
             
           
             
+              <li class="toctree-l2"><a class="reference internal" href="/docs/logfile-plugin/">Logfile Plugin</a></li>
+            
+          
+            
               <li class="toctree-l2"><a class="reference internal" href="/docs/json-data-model/">JSON Data Model</a></li>
             
           
@@ -1054,6 +1050,38 @@
               <li class="toctree-l2"><a class="reference internal" href="/docs/sequence-files/">Sequence Files</a></li>
             
           
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/httpd-format-plugin/">HTTPD Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/image-metadata-format-plugin/">Image Metadata Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/syslog-format-plugin/">Syslog Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/ltsv-format-plugin/">LTSV Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/spss-format-plugin/">SPSS Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/esri-shapefile-format-plugin/">ESRI Shapefile Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/excel-format-plugin/">Excel Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/hdf5-format-plugin/">HDF5 Format Plugin</a></li>
+            
+          
           </ul>
         
       
@@ -1171,6 +1199,10 @@
           <ul style="display: none">
           
             
+              <li class="toctree-l2"><a class="reference internal" href="/docs/apache-drill-1-18-0-release-notes/">Apache Drill 1.18.0 Release Notes</a></li>
+            
+          
+            
               <li class="toctree-l2"><a class="reference internal" href="/docs/apache-drill-1-17-0-release-notes/">Apache Drill 1.17.0 Release Notes</a></li>
             
           
@@ -1337,313 +1369,356 @@
 
     </div>
 
-     May 31, 2019
-
-    <link href="/css/docpage.css" rel="stylesheet" type="text/css">
+    <!-- jt: we don't need to display a last-modified date on each page to users
+     Dec 10, 2020
+    --> 
 
     <div class="int_text" align="left">
       
         <p>Drill 1.16 and later supports the ANALYZE TABLE COMPUTE STATISTICS statement. The ANALYZE TABLE COMPUTE STATISTICS statement computes statistics on Parquet data stored in tables and directories. The optimizer in Drill uses statistics to estimate filter, aggregation, and join cardinalities and create an optimal query plan. 
-ANALYZE TABLE COMPUTE STATISTICS writes statistics to a JSON file in the <code>.stats.drill</code> directory, for example <code>/user/table1/.stats.drill/0_0.json</code>. </p>
+ANALYZE TABLE COMPUTE STATISTICS writes statistics to a JSON file in the <code class="language-plaintext highlighter-rouge">.stats.drill</code> directory, for example <code class="language-plaintext highlighter-rouge">/user/table1/.stats.drill/0_0.json</code>.</p>
 
-<p>Drill will not use the statistics for query planning unless you enable the <code>planner.statistics.use</code> option, as shown:</p>
-<div class="highlight"><pre><code class="language-text" data-lang="text">SET `planner.statistics.use` = true;
-</code></pre></div>
-<p>Alternatively, you can enable the option in the Drill Web UI at <code>http://&lt;drill-hostname-or-ip-address&gt;:8047/options</code>.</p>
+<p>Drill will not use the statistics for query planning unless you enable the <code class="language-plaintext highlighter-rouge">planner.statistics.use</code> option, as shown:</p>
+
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>SET `planner.statistics.use` = true;
+</code></pre></div></div>
+
+<p>Alternatively, you can enable the option in the Drill Web UI at <code class="language-plaintext highlighter-rouge">http://&lt;drill-hostname-or-ip-address&gt;:8047/options</code>.</p>
 
 <h2 id="syntax">Syntax</h2>
 
-<p>The ANALYZE TABLE COMPUTE STATISTICS statement supports the following syntax:  </p>
-<div class="highlight"><pre><code class="language-text" data-lang="text">ANALYZE TABLE [workspace.]table_name COMPUTE STATISTICS [(column1, column2,...)] [SAMPLE number PERCENT]
-</code></pre></div>
+<p>The ANALYZE TABLE COMPUTE STATISTICS statement supports the following syntax:</p>
+
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>ANALYZE TABLE [workspace.]table_name COMPUTE STATISTICS [(column1, column2,...)] [SAMPLE number PERCENT]
+</code></pre></div></div>
+
 <h2 id="parameters">Parameters</h2>
 
-<p><em>workspace</em><br>
-Optional. A configured storage plugin and workspace, like <code>dfs.samples</code>. For example, in <code>dfs.samples</code>, <code>dfs</code> is the file system storage plugin and samples is the <code>workspace</code> configured to point to a directory on the file system. </p>
+<p><em>workspace</em><br />
+Optional. A configured storage plugin and workspace, like <code class="language-plaintext highlighter-rouge">dfs.samples</code>. For example, in <code class="language-plaintext highlighter-rouge">dfs.samples</code>, <code class="language-plaintext highlighter-rouge">dfs</code> is the file system storage plugin and samples is the <code class="language-plaintext highlighter-rouge">workspace</code> configured to point to a directory on the file system.</p>
 
-<p><em>table_name</em><br>
-The name of the table or directory for which Drill will generate statistics. </p>
+<p><em>table_name</em><br />
+The name of the table or directory for which Drill will generate statistics.</p>
 
-<p><em>COMPUTE STATISTICS</em><br>
-Generates statistics for the table, columns, or directory specified.   </p>
+<p><em>COMPUTE STATISTICS</em><br />
+Generates statistics for the table, columns, or directory specified.</p>
 
-<p><em>column</em><br>
-The name of the column(s) for which Drill will generate statistics.  </p>
+<p><em>column</em><br />
+The name of the column(s) for which Drill will generate statistics.</p>
 
-<p><em>SAMPLE</em><br>
+<p><em>SAMPLE</em>  <br />
 Optional. Indicates that compute statistics should run on a subset of the data.</p>
 
-<p><em>number PERCENT</em><br>
-An integer that specifies the percentage of data on which to compute statistics. For example, if a table has 100 rows, <code>SAMPLE 50 PERCENT</code> indicates that statistics should be computed on 50 rows. The optimizer selects the rows at random. </p>
+<p><em>number PERCENT</em><br />
+An integer that specifies the percentage of data on which to compute statistics. For example, if a table has 100 rows, <code class="language-plaintext highlighter-rouge">SAMPLE 50 PERCENT</code> indicates that statistics should be computed on 50 rows. The optimizer selects the rows at random.</p>
 
 <h2 id="related-command">Related Command</h2>
 
-<p>If you drop a table that you have already run ANALYZE TABLE COMPUTE STATISTICS against, the statistics are automatically removed with the table:  </p>
-<div class="highlight"><pre><code class="language-text" data-lang="text">DROP TABLE [IF EXISTS] [workspace.]name  
-</code></pre></div>
-<p>To remove statistics for a table you want to keep, you must remove the directory in which Drill stores the statistics:  </p>
-<div class="highlight"><pre><code class="language-text" data-lang="text">DROP TABLE [IF EXISTS] [workspace.]name/.stats.drill  
-</code></pre></div>
-<p>If you have already issued the ANALYZE TABLE COMPUTE STATISTICS statement against specific columns, table, or directory, you must run the DROP TABLE statement with <code>/.stats.drill</code> before you can successfully run the ANALYZE TABLE COMPUTE STATISTICS statement against the data source again, for example:</p>
-<div class="highlight"><pre><code class="language-text" data-lang="text">DROP TABLE `table_stats/Tpch0.01/parquet/customer/.stats.drill`;
-</code></pre></div>
-<p>Note that <code>/.stats.drill</code> is the directory to which the JSON file with statistics is written.   </p>
+<p>If you drop a table that you have already run ANALYZE TABLE COMPUTE STATISTICS against, the statistics are automatically removed with the table:</p>
 
-<h2 id="usage-notes">Usage Notes</h2>
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>DROP TABLE [IF EXISTS] [workspace.]name  
+</code></pre></div></div>
 
-<ul>
-<li>The ANALYZE TABLE COMPUTE STATISTICS statement can compute statistics for Parquet data stored in tables, columns, and directories within dfs storage plugins only.<br></li>
-<li>The user running the ANALYZE TABLE COMPUTE STATISTICS statement must have read and write permissions on the data source.<br></li>
-<li><p>The optimizer in Drill computes the following types of statistics for each column:  </p>
+<p>To remove statistics for a table you want to keep, you must remove the directory in which Drill stores the statistics:</p>
+
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>DROP TABLE [IF EXISTS] [workspace.]name/.stats.drill  
+</code></pre></div></div>
+
+<p>If you have already issued the ANALYZE TABLE COMPUTE STATISTICS statement against specific columns, table, or directory, you must run the DROP TABLE statement with <code class="language-plaintext highlighter-rouge">/.stats.drill</code> before you can successfully run the ANALYZE TABLE COMPUTE STATISTICS statement against the data source again, for example:</p>
+
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>DROP TABLE `table_stats/Tpch0.01/parquet/customer/.stats.drill`;
+</code></pre></div></div>
+
+<p>Note that <code class="language-plaintext highlighter-rouge">/.stats.drill</code> is the directory to which the JSON file with statistics is written.</p>
+
+<h2 id="usage-notes">Usage Notes</h2>
 
 <ul>
-<li>Rowcount (total number of entries in the table)<br></li>
-<li>Nonnullrowcount (total number of non-null entries in the table)<br></li>
-<li>NDV (total distinct values in the table)<br></li>
-<li>Avgwidth (average width, in bytes, of a column)<br></li>
-<li>Majortype (data type and data mode (OPTIONAL, REQUIRED, REPEATED) of the column values)<br></li>
-<li>Histogram (represents the frequency distribution of values (numeric data) in a column) See <a href="/docs/analyze-table-compute-statistics/#histograms">Histograms</a>.<br></li>
-<li><p>When you look at the statistics file, statistics for each column display in the following format (c_nationkey is used as an example column):  </p>
-<div class="highlight"><pre><code class="language-text" data-lang="text">{&quot;column&quot;:&quot;`c_nationkey`&quot;,&quot;majortype&quot;:{&quot;type&quot;:&quot;INT&quot;,&quot;mode&quot;:&quot;REQUIRED&quot;},&quot;schema&quot;:1.0,&quot;rowcount&quot;:1500.0,&quot;nonnullrowcount&quot;:1500.0,&quot;ndv&quot;:25,&quot;avgwidth&quot;:4.0,&quot;histogram&quot;:{&quot;category&quot;:&quot;numeric-equi-depth&quot;,&quot;numRowsPerBucket&quot;:150,&quot;buckets&quot;:[0.0,2.0,4.0,7.0,9.0 [...]
-</code></pre></div></li>
-</ul></li>
-<li><p>ANALYZE TABLE COMPUTE STATISTICS can compute statistics on nested scalar columns; however, you must explicitly state the columns, for example:<br>
-     <code>ANALYZE TABLE employee_table COMPUTE STATISTICS (name.firstname, name.lastname);</code>  </p></li>
-<li><p>ANALYZE TABLE COMPUTE STATISTICS can compute statistics at the root directory level, but not at the partition level. 
-Drill does not compute statistics for complex types (maps, arrays).</p></li>
+  <li>The ANALYZE TABLE COMPUTE STATISTICS statement can compute statistics for Parquet data stored in tables, columns, and directories within dfs storage plugins only.</li>
+  <li>The user running the ANALYZE TABLE COMPUTE STATISTICS statement must have read and write permissions on the data source.</li>
+  <li>The optimizer in Drill computes the following types of statistics for each column:
+    <ul>
+      <li>Rowcount (total number of entries in the table)</li>
+      <li>Nonnullrowcount (total number of non-null entries in the table)</li>
+      <li>NDV (total distinct values in the table)</li>
+      <li>Avgwidth (average width, in bytes, of a column)</li>
+      <li>Majortype (data type and data mode (OPTIONAL, REQUIRED, REPEATED) of the column values)</li>
+      <li>Histogram (represents the frequency distribution of values (numeric data) in a column) See <a href="/docs/analyze-table-compute-statistics/#histograms">Histograms</a>.</li>
+      <li>
+        <p>When you look at the statistics file, statistics for each column display in the following format (c_nationkey is used as an example column):</p>
+
+        <div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>  {"column":"`c_nationkey`","majortype":{"type":"INT","mode":"REQUIRED"},"schema":1.0,"rowcount":1500.0,"nonnullrowcount":1500.0,"ndv":25,"avgwidth":4.0,"histogram":{"category":"numeric-equi-depth","numRowsPerBucket":150,"buckets":[0.0,2.0,4.0,7.0,9.0,12.0,15.199999999999978,17.0,19.0,22.0,24.0]}}  
+</code></pre></div>        </div>
+      </li>
+    </ul>
+  </li>
+  <li>ANALYZE TABLE COMPUTE STATISTICS can compute statistics on nested scalar columns; however, you must explicitly state the columns, for example:  <br />
+       <code class="language-plaintext highlighter-rouge">ANALYZE TABLE employee_table COMPUTE STATISTICS (name.firstname, name.lastname);</code></li>
+  <li>ANALYZE TABLE COMPUTE STATISTICS can compute statistics at the root directory level, but not at the partition level. 
+Drill does not compute statistics for complex types (maps, arrays).</li>
 </ul>
 
 <h2 id="related-options">Related Options</h2>
-
-<p>You can set the following options related to the ANALYZE TABLE COMPUTE STATISTICS statement at the system or session level with the SET (session level) or ALTER SYSTEM SET (system level) statements, or through the Drill Web UI at <code>http://&lt;drill-hostname-or-ip&gt;:8047/options</code>:  </p>
+<p>You can set the following options related to the ANALYZE TABLE COMPUTE STATISTICS statement at the system or session level with the SET (session level) or ALTER SYSTEM SET (system level) statements, or through the Drill Web UI at <code class="language-plaintext highlighter-rouge">http://&lt;drill-hostname-or-ip&gt;:8047/options</code>:</p>
 
 <ul>
-<li><strong>planner.statistics.use</strong><br>
-Enables the query planner to use statistics. When disabled, ANALYZE TABLE COMPUTE STATISTICS generates statistics, but the query planner will not use the statistics unless this option is enabled. Disabled (false) by default. </li>
-<li><strong>exec.statistics.ndv_accuracy</strong><br>
-Controls the trade-off between NDV statistic computation memory cost and accuracy. Controls the amount of memory for estimates. More memory produces more accurate estimates. The default value should suffice for most scenarios. Default is 20. Range is 0- 30.<br></li>
-<li><strong>exec.statistics.ndv_extrapolation_bf_elements</strong><br>
-Controls the trade-off between NDV statistics computation memory cost and sampling extrapolation accuracy. Relates specifically to SAMPLE. The default value should suffice for most scenarios. Increasing the value requires additional memory. Default is 1000000.<br></li>
-<li><strong>exec.statistics.ndv_extrapolation_bf_fpprobability</strong><br>
-Controls the trade-off between NDV statistics computation memory cost and sampling extrapolation accuracy. Controls the overall accuracy of statistics when using sampling. Default is 10 percent. Range is 0-100.<br></li>
-<li><strong>exec.statistics.deterministic_sampling</strong><br>
-Turns deterministic sampling on and off. Relates specifically to SAMPLE. Default is false.<br></li>
-<li><strong>exec.statistics.tdigest_compression</strong><br>
-Controls the &#39;compression&#39; factor for the TDigest algorithm used for histogram statistics. Controls trade-off between t-digest quantile statistic storage cost and accuracy. Higher values use more groups (clusters) for the t-digest and improve accuracy at the expense of extra storage. Positive integer values in the range [1, 10000].  Default is 100.<br></li>
+  <li><strong>planner.statistics.use</strong><br />
+Enables the query planner to use statistics. When disabled, ANALYZE TABLE COMPUTE STATISTICS generates statistics, but the query planner will not use the statistics unless this option is enabled. Disabled (false) by default.</li>
+  <li><strong>exec.statistics.ndv_accuracy</strong><br />
+Controls the trade-off between NDV statistic computation memory cost and accuracy. Controls the amount of memory for estimates. More memory produces more accurate estimates. The default value should suffice for most scenarios. Default is 20. Range is 0- 30.</li>
+  <li><strong>exec.statistics.ndv_extrapolation_bf_elements</strong><br />
+Controls the trade-off between NDV statistics computation memory cost and sampling extrapolation accuracy. Relates specifically to SAMPLE. The default value should suffice for most scenarios. Increasing the value requires additional memory. Default is 1000000.</li>
+  <li><strong>exec.statistics.ndv_extrapolation_bf_fpprobability</strong><br />
+Controls the trade-off between NDV statistics computation memory cost and sampling extrapolation accuracy. Controls the overall accuracy of statistics when using sampling. Default is 10 percent. Range is 0-100.</li>
+  <li><strong>exec.statistics.deterministic_sampling</strong><br />
+Turns deterministic sampling on and off. Relates specifically to SAMPLE. Default is false.</li>
+  <li><strong>exec.statistics.tdigest_compression</strong><br />
+Controls the ‘compression’ factor for the TDigest algorithm used for histogram statistics. Controls trade-off between t-digest quantile statistic storage cost and accuracy. Higher values use more groups (clusters) for the t-digest and improve accuracy at the expense of extra storage. Positive integer values in the range [1, 10000].  Default is 100.</li>
 </ul>
 
 <h2 id="reserved-keywords">Reserved Keywords</h2>
 
-<p>The ANALYZE TABLE COMPUTE STATISTICS statement introduces the following reserved keywords:  </p>
-<div class="highlight"><pre><code class="language-text" data-lang="text">Analyze  
+<p>The ANALYZE TABLE COMPUTE STATISTICS statement introduces the following reserved keywords:</p>
+
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>Analyze  
 Compute  
 Estimate  
 Statistics  
 Sample  
-</code></pre></div>
-<p>If you use any of these words in a Drill query, you must enclose the word in backticks. For example, if you query a table named “estimate,” you would enclose the word &quot;estimate&quot; in backticks, as shown:  </p>
-<div class="highlight"><pre><code class="language-text" data-lang="text">SELECT * FROM `estimate`;
-</code></pre></div>
+</code></pre></div></div>
+
+<p>If you use any of these words in a Drill query, you must enclose the word in backticks. For example, if you query a table named “estimate,” you would enclose the word “estimate” in backticks, as shown:</p>
+
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>SELECT * FROM `estimate`;
+</code></pre></div></div>
+
 <h2 id="analyze-table-compute-statistics-performance">ANALYZE TABLE COMPUTE STATISTICS Performance</h2>
 
 <ul>
-<li>After you run the ANALYZE TABLE COMPUTE STATISTICS statement, you can view the profile for ANALYZE in the Drill Web UI. Go to <code>http://&lt;drill-hostname-or-ip&gt;:8047/profiles</code>, and click the ANALYZE TABLE statement for which you want to view the profile.<br></li>
-<li>Should you notice any performance issues, you may want to decrease the value of the <code>planner.slice_target</code> option.<br></li>
-<li>Generating statistics on large data sets can consume time and resources, such as memory and CPU. ANALYZE TABLE COMPUTE STATISTICS can compute statistics on a sample (subset of the data indicated as a percentage) to limit the amount of resources needed for computation. Drill still scans the entire data set, but only computes on the rows selected for sampling. Rows are randomly selected for the sample. Note that the quality of statistics increases with the sample size.<br></li>
+  <li>After you run the ANALYZE TABLE COMPUTE STATISTICS statement, you can view the profile for ANALYZE in the Drill Web UI. Go to <code class="language-plaintext highlighter-rouge">http://&lt;drill-hostname-or-ip&gt;:8047/profiles</code>, and click the ANALYZE TABLE statement for which you want to view the profile.</li>
+  <li>Should you notice any performance issues, you may want to decrease the value of the <code class="language-plaintext highlighter-rouge">planner.slice_target</code> option.</li>
+  <li>Generating statistics on large data sets can consume time and resources, such as memory and CPU. ANALYZE TABLE COMPUTE STATISTICS can compute statistics on a sample (subset of the data indicated as a percentage) to limit the amount of resources needed for computation. Drill still scans the entire data set, but only computes on the rows selected for sampling. Rows are randomly selected for the sample. Note that the quality of statistics increases with the sample size.</li>
 </ul>
 
 <h2 id="queries-that-benefit-from-statistics">Queries that Benefit from Statistics</h2>
-
 <p>Typically, the types of queries that benefit from statistics are those that include:</p>
 
 <ul>
-<li>Grouping<br></li>
-<li>Multi-table joins<br></li>
-<li>Equality predicates on scalar columns<br></li>
-<li>Range predicates (filters) on numeric columns</li>
+  <li>Grouping</li>
+  <li>Multi-table joins</li>
+  <li>Equality predicates on scalar columns</li>
+  <li>Range predicates (filters) on numeric columns</li>
 </ul>
 
 <h2 id="histograms">Histograms</h2>
 
-<p><strong>Note:</strong> Currently, histograms are supported for numeric columns only.  </p>
+<p><strong>Note:</strong> Currently, histograms are supported for numeric columns only.</p>
+
+<p>Histograms show the distribution of data to determine if data is skewed or normally distributed. Histogram statistics improve the selectivity estimates used by the optimizer to create the most efficient query plans possible. Histogram statistics are useful for range predicates to help determine how many rows belong to a particular range.</p>
 
-<p>Histograms show the distribution of data to determine if data is skewed or normally distributed. Histogram statistics improve the selectivity estimates used by the optimizer to create the most efficient query plans possible. Histogram statistics are useful for range predicates to help determine how many rows belong to a particular range.   </p>
+<p>Running the ANALYZE TABLE COMPUTE STATISTICS statement generates equi-depth histogram statistics on each column in a table. Equi-depth histograms distribute distinct column values across buckets of varying widths, with all buckets having approximately the same number of rows. The fixed number of rows per bucket is predetermined by <code class="language-plaintext highlighter-rouge">ceil(number_rows/n)</code>, where <code class="language-plaintext highlighter-rouge">n</code> is the numb [...]
 
-<p>Running the ANALYZE TABLE COMPUTE STATISTICS statement generates equi-depth histogram statistics on each column in a table. Equi-depth histograms distribute distinct column values across buckets of varying widths, with all buckets having approximately the same number of rows. The fixed number of rows per bucket is predetermined by <code>ceil(number_rows/n)</code>, where <code>n</code> is the number of buckets. The number of distinct values in each bucket depends on the distribution of [...]
+<p>The following diagram shows the column values on the horizontal axis and the individual frequencies (dark blue) and total frequency of a bucket (light blue). In this example, the total number of rows = 64, hence the number of rows per bucket = <code class="language-plaintext highlighter-rouge">ceil(64/4)  = 16</code>.</p>
 
-<p>The following diagram shows the column values on the horizontal axis and the individual frequencies (dark blue) and total frequency of a bucket (light blue). In this example, the total number of rows = 64, hence the number of rows per bucket = <code>ceil(64/4)  = 16</code>.  </p>
+<p><img src="https://i.imgur.com/imchEyg.png" alt="" /></p>
 
-<p><img src="https://i.imgur.com/imchEyg.png" alt="">  </p>
+<p>The following steps are used to determine bucket boundaries:</p>
+<ol>
+  <li>Determine the number of rows per bucket: ceil(N/m) where m = num buckets.</li>
+  <li>Sort the data on the column.</li>
+  <li>Determine bucket boundaries: The start of bucket 0  = min(column), then continue adding individual frequencies until the row limit is reached, which is the end point of the bucket. Continue to the next bucket and repeat the process. The same column value can potentially be at the end point of one bucket and the start point of the next bucket. Also, the last bucket could have slightly fewer values than other buckets.</li>
+</ol>
 
-<p>The following steps are used to determine bucket boundaries:<br>
-1. Determine the number of rows per bucket: ceil(N/m) where m = num buckets.<br>
-2. Sort the data on the column.<br>
-3. Determine bucket boundaries: The start of bucket 0  = min(column), then continue adding individual frequencies until the row limit is reached, which is the end point of the bucket. Continue to the next bucket and repeat the process. The same column value can potentially be at the end point of one bucket and the start point of the next bucket. Also, the last bucket could have slightly fewer values than other buckets.  </p>
+<p>For the predicate <code class="language-plaintext highlighter-rouge">"WHERE a = 5"</code>, in the example histogram above, you can see that 5 is in the first bucket, which has a range of [1, 7], Using the ‘continuous variable’ nature of histograms, and assuming a uniform distribution within a bucket, we get 16/7 = 2 (approximately).  This is closer to the actual value of 1.</p>
 
-<p>For the predicate <code>&quot;WHERE a = 5&quot;</code>, in the example histogram above, you can see that 5 is in the first bucket, which has a range of [1, 7], Using the ‘continuous variable’ nature of histograms, and assuming a uniform distribution within a bucket, we get 16/7 = 2 (approximately).  This is closer to the actual value of 1.</p>
+<p>Next, consider the range predicate <code class="language-plaintext highlighter-rouge">"WHERE a &gt; 5 AND a &lt;= 16"</code>.  The range spans part of bucket [1, 7] and entire buckets [8, 9], [10, 11] and [12, 16].  The total estimate = (7-5)/7 * 16 + 16 + 16 + 16 = 53 (approximately).  The actual count is 59.</p>
 
-<p>Next, consider the range predicate <code>&quot;WHERE a &gt; 5 AND a &lt;= 16&quot;</code>.  The range spans part of bucket [1, 7] and entire buckets [8, 9], [10, 11] and [12, 16].  The total estimate = (7-5)/7 * 16 + 16 + 16 + 16 = 53 (approximately).  The actual count is 59.</p>
+<p><strong>Viewing Histogram Statistics for a Column</strong><br />
+Histogram statistics are generated for each column, as shown:</p>
 
-<p><strong>Viewing Histogram Statistics for a Column</strong><br>
-Histogram statistics are generated for each column, as shown:  </p>
-<div class="highlight"><pre><code class="language-text" data-lang="text">qhistogram&quot;:{&quot;category&quot;:&quot;numeric-equi-depth&quot;,&quot;numRowsPerBucket&quot;:150,&quot;buckets&quot;:[0.0,2.0,4.0,7.0,9.0,12.0,15.199999999999978,17.0,19.0,22.0,24.0]
-</code></pre></div>
-<p>In this example, there are 10 buckets. Each bucket contains 150 rows, which is calculated as the number of rows (1500)/number of buckets (10). The list of numbers for the “buckets” property indicates bucket boundaries, with the first bucket starting at 0.0 and ending at 2.0. The end of the first bucket is the start point for the second bucket, such that the second bucket starts at 2.0 and ends at 4.0, and so on for the remainder of the buckets. </p>
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>qhistogram":{"category":"numeric-equi-depth","numRowsPerBucket":150,"buckets":[0.0,2.0,4.0,7.0,9.0,12.0,15.199999999999978,17.0,19.0,22.0,24.0]
+</code></pre></div></div>
+
+<p>In this example, there are 10 buckets. Each bucket contains 150 rows, which is calculated as the number of rows (1500)/number of buckets (10). The list of numbers for the “buckets” property indicates bucket boundaries, with the first bucket starting at 0.0 and ending at 2.0. The end of the first bucket is the start point for the second bucket, such that the second bucket starts at 2.0 and ends at 4.0, and so on for the remainder of the buckets.</p>
 
 <h2 id="limitations">Limitations</h2>
 
 <ul>
-<li>Drill does not cache statistics. </li>
-<li>ANALYZE TABLE COMPUTE STATISTICS runs only on directory-based Parquet tables. </li>
-<li>ANALYZE TABLE COMPUTE STATISTICS cannot do the following:<br>
+  <li>Drill does not cache statistics.</li>
+  <li>ANALYZE TABLE COMPUTE STATISTICS runs only on directory-based Parquet tables.</li>
+  <li>ANALYZE TABLE COMPUTE STATISTICS cannot do the following:
+    <ul>
+      <li>compute statistics on schema-less file formats, such as text and CSV</li>
+      <li>provide up-to-date statistics for operational data due to potential mismatches that can occur between operational updates and manually running ANALYZE TABLE</li>
+    </ul>
+  </li>
+  <li>
+    <p>Running the ANALYZE TABLE COMPUTE STATISTICS statement against multiple files in which some of the files have null values and others have no null values may return the following generic Drill error, which is not specific to the ANALYZE command:</p>
 
-<ul>
-<li>compute statistics on schema-less file formats, such as text and CSV</li>
-<li>provide up-to-date statistics for operational data due to potential mismatches that can occur between operational updates and manually running ANALYZE TABLE<br></li>
-</ul></li>
-<li><p>Running the ANALYZE TABLE COMPUTE STATISTICS statement against multiple files in which some of the files have null values and others have no null values may return the following generic Drill error, which is not specific to the ANALYZE command:  </p>
-<div class="highlight"><pre><code class="language-text" data-lang="text">Error: SYSTEM ERROR: IllegalStateException: Failure while reading vector. 
- Expected vector class of org.apache.drill.exec.vector.NullableBigIntVector
-but was holding vector class org.apache.drill.exec.vector.IntVector, field= [`o_custkey` (INT:REQUIRED)] 
-
-//If you encounter this error, run the ANALYZE TABLE COMPUTE STATISTICS statement on each file with null values individually instead of running the statement against all the files at once.  
-</code></pre></div></li>
-<li><p>Running the ANALYZE TABLE COMPUTE STATISTICS statement creates the stats file, which changes the directory timestamp. The change of the timestamp automatically  triggers the REFRESH TABLE METADATA command, even when the underlying data has not changed.  </p></li>
+    <div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>  Error: SYSTEM ERROR: IllegalStateException: Failure while reading vector. 
+   Expected vector class of org.apache.drill.exec.vector.NullableBigIntVector
+  but was holding vector class org.apache.drill.exec.vector.IntVector, field= [`o_custkey` (INT:REQUIRED)] 
+ 
+  //If you encounter this error, run the ANALYZE TABLE COMPUTE STATISTICS statement on each file with null values individually instead of running the statement against all the files at once.  
+</code></pre></div>    </div>
+  </li>
+  <li>Running the ANALYZE TABLE COMPUTE STATISTICS statement creates the stats file, which changes the directory timestamp. The change of the timestamp automatically  triggers the REFRESH TABLE METADATA command, even when the underlying data has not changed.</li>
 </ul>
 
 <h2 id="examples">EXAMPLES</h2>
 
-<p>These examples use a schema, <code>dfs.drilltestdir</code>, which points to the <code>/drill/testdata</code> directory in the MapR File System. The <code>/drill/testdata</code> directory has the following subdirectories: </p>
-<div class="highlight"><pre><code class="language-text" data-lang="text">/drill/testdata/table_stats/Tpch0.01/parquet
-</code></pre></div>
-<p>The <code>/parquet</code>directory contains a table named “customer.”</p>
+<p>These examples use a schema, <code class="language-plaintext highlighter-rouge">dfs.drilltestdir</code>, which points to the <code class="language-plaintext highlighter-rouge">/drill/testdata</code> directory in the MapR File System. The <code class="language-plaintext highlighter-rouge">/drill/testdata</code> directory has the following subdirectories:</p>
+
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>/drill/testdata/table_stats/Tpch0.01/parquet
+</code></pre></div></div>
+
+<p>The <code class="language-plaintext highlighter-rouge">/parquet</code>directory contains a table named “customer.”</p>
+
+<p>Switch schema to <code class="language-plaintext highlighter-rouge">dfs.drilltestdir</code>:</p>
 
-<p>Switch schema to <code>dfs.drilltestdir</code>:</p>
-<div class="highlight"><pre><code class="language-text" data-lang="text">use dfs.drilltestdir;
-+------+----------------------------------------------+
-|  ok  |                summary                       |
-+------+----------------------------------------------+
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>use dfs.drilltestdir;
+|------|----------------------------------------------|
+|  ok  |               	summary                	      |
+|------|----------------------------------------------|
 | true | Default schema changed to [dfs.drilltestdir] |
-+------+----------------------------------------------+
-</code></pre></div>
-<p>The following query shows the columns and types of data in the “customer” table:  </p>
-<div class="highlight"><pre><code class="language-text" data-lang="text">apache drill (dfs.drilltestdir)&gt; select * from `table_stats/Tpch0.01/parquet/customer` limit 2;
-+-----------+--------------------+--------------------------------+-------------+-----------------+-----------+--------------+-----------------------------------------------------------------+
+|------|----------------------------------------------|
+</code></pre></div></div>
+
+<p>The following query shows the columns and types of data in the “customer” table:</p>
+
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>apache drill (dfs.drilltestdir)&gt; select * from `table_stats/Tpch0.01/parquet/customer` limit 2;
+|-----------|--------------------|--------------------------------|-------------|-----------------|-----------|--------------|-----------------------------------------------------------------|
 | c_custkey |       c_name       |           c_address            | c_nationkey |     c_phone     | c_acctbal | c_mktsegment |                            c_comment                            |
-+-----------+--------------------+--------------------------------+-------------+-----------------+-----------+--------------+-----------------------------------------------------------------+
+|-----------|--------------------|--------------------------------|-------------|-----------------|-----------|--------------|-----------------------------------------------------------------|
 | 1         | Customer#000000001 | IVhzIApeRb ot,c,E              | 15          | 25-989-741-2988 | 711.56    | BUILDING     | to the even, regular platelets. regular, ironic epitaphs nag e  |
 | 2         | Customer#000000002 | XSTf4,NCwDVaWNe6tEgvwfmRchLXak | 13          | 23-768-687-3665 | 121.65    | AUTOMOBILE   | l accounts. blithely ironic theodolites integrate boldly: caref |
-+-----------+--------------------+--------------------------------+-------------+-----------------+-----------+--------------+-----------------------------------------------------------------+
-</code></pre></div>
-<h3 id="enabling-statistics-for-query-planning">Enabling Statistics for Query Planning</h3>
+|-----------|--------------------|--------------------------------|-------------|-----------------|-----------|--------------|-----------------------------------------------------------------|
+</code></pre></div></div>
 
+<h3 id="enabling-statistics-for-query-planning">Enabling Statistics for Query Planning</h3>
 <p>You can run the ANALYZE TABLE COMPUTE STATISTICS statement at any time to compute statistics; however, you must enable the following option if you want Drill to use statistics during query planning:</p>
-<div class="highlight"><pre><code class="language-text" data-lang="text">set `planner.statistics.use`=true;
-+------+---------------------------------+
-|  ok  |             summary             |
-+------+---------------------------------+
+
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>set `planner.statistics.use`=true;
+|------|---------------------------------|
+|  ok  |             summary         	 |
+|------|---------------------------------|
 | true | planner.statistics.use updated. |
-+------+---------------------------------+
-</code></pre></div>
-<h3 id="computing-statistics">Computing Statistics</h3>
+|------|---------------------------------|
+</code></pre></div></div>
 
+<h3 id="computing-statistics">Computing Statistics</h3>
 <p>You can compute statistics on directories with Parquet data or on Parquet tables.</p>
 
 <p>You can run the ANALYZE TABLE COMPUTE STATISTICS statement on a subset of columns to generate statistics for those columns only, as shown:</p>
-<div class="highlight"><pre><code class="language-text" data-lang="text">analyze table `table_stats/Tpch0.01/parquet/customer` compute statistics (c_custkey, c_nationkey, c_acctbal);
-+----------+---------------------------+
+
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>analyze table `table_stats/Tpch0.01/parquet/customer` compute statistics (c_custkey, c_nationkey, c_acctbal);
+|----------|---------------------------|
 | Fragment | Number of records written |
-+----------+---------------------------+
+|----------|---------------------------|
 | 0_0      | 3                         |
-+----------+---------------------------+
-</code></pre></div>
+|----------|---------------------------|
+</code></pre></div></div>
+
 <p>Or, you can run the ANALYZE TABLE COMPUTE STATISTICS statement on the entire table/directory if you want statistics generated for all the columns:</p>
-<div class="highlight"><pre><code class="language-text" data-lang="text">analyze table `table_stats/Tpch0.01/parquet/customer` compute statistics;
-+----------+---------------------------+
+
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>analyze table `table_stats/Tpch0.01/parquet/customer` compute statistics;
+|----------|---------------------------|
 | Fragment | Number of records written |
-+----------+---------------------------+
+|----------|---------------------------|
 | 0_0      | 8                         |
-+----------+---------------------------+
-</code></pre></div>
-<h3 id="computing-statistics-on-a-sample">Computing Statistics on a SAMPLE</h3>
+|----------|---------------------------|
+</code></pre></div></div>
 
+<h3 id="computing-statistics-on-a-sample">Computing Statistics on a SAMPLE</h3>
 <p>You can also run ANALYZE TABLE COMPUTE STATISTICS on a percentage of the data using the SAMPLE command, as shown:</p>
-<div class="highlight"><pre><code class="language-text" data-lang="text">ANALYZE TABLE `table_stats/Tpch0.01/parquet/customer` COMPUTE STATISTICS SAMPLE 50 PERCENT;
-+----------+---------------------------+
+
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>ANALYZE TABLE `table_stats/Tpch0.01/parquet/customer` COMPUTE STATISTICS SAMPLE 50 PERCENT;
+|----------|---------------------------|
 | Fragment | Number of records written |
-+----------+---------------------------+
+|----------|---------------------------|
 | 0_0      | 8                         |
-+----------+---------------------------+
-</code></pre></div>
-<h3 id="storing-statistics">Storing Statistics</h3>
+|----------|---------------------------|
+</code></pre></div></div>
 
-<p>When you generate statistics, a statistics directory (<code>.stats.drill</code>) is created with a JSON file that contains the statistical data.</p>
+<h3 id="storing-statistics">Storing Statistics</h3>
+<p>When you generate statistics, a statistics directory (<code class="language-plaintext highlighter-rouge">.stats.drill</code>) is created with a JSON file that contains the statistical data.</p>
 
-<p>For tables, the <code>.stats.drill</code> directory is nested within the table directory. For example, if you ran ANALYZE TABLE COMPUTE STATISTICS against a table named “customer,” you could access the statistic file in <code>/customer/.stats.drill</code>. The JSON file is stored in the <code>.stats.drill</code> directory.</p>
+<p>For tables, the <code class="language-plaintext highlighter-rouge">.stats.drill</code> directory is nested within the table directory. For example, if you ran ANALYZE TABLE COMPUTE STATISTICS against a table named “customer,” you could access the statistic file in <code class="language-plaintext highlighter-rouge">/customer/.stats.drill</code>. The JSON file is stored in the <code class="language-plaintext highlighter-rouge">.stats.drill</code> directory.</p>
 
-<p>For directories, a new directory is written with the same name as the directory on which you ran ANALYZE TABLE COMPUTE STATISTICS, appended by <code>.stats.drill</code>. For example, if you ran ANALYZE TABLE COMPUTE STATISTICS against a directory named “customer,” you could access the JSON statistics file in the new <code>customer.stats.drill</code> directory.</p>
+<p>For directories, a new directory is written with the same name as the directory on which you ran ANALYZE TABLE COMPUTE STATISTICS, appended by <code class="language-plaintext highlighter-rouge">.stats.drill</code>. For example, if you ran ANALYZE TABLE COMPUTE STATISTICS against a directory named “customer,” you could access the JSON statistics file in the new <code class="language-plaintext highlighter-rouge">customer.stats.drill</code> directory.</p>
 
 <p>You can query the statistics file to see the statistics generated for each column, as shown in the following two examples:</p>
-<div class="highlight"><pre><code class="language-text" data-lang="text">select * from `table_stats/Tpch0.01/parquet/customer/.stats.drill`;
-+--------------------+----------------------------------------------------------------------------------+
+
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>select * from `table_stats/Tpch0.01/parquet/customer/.stats.drill`;
+|--------------------|----------------------------------------------------------------------------------|
 | statistics_version |                                   directories                                    |
-+--------------------+----------------------------------------------------------------------------------+
-| v1                 | [{&quot;computed&quot;:&quot;2019-04-30&quot;,&quot;columns&quot;:[{&quot;column&quot;:&quot;`c_custkey`&quot;,&quot;majortype&quot;:{&quot;type&quot;:&quot;INT&quot;,&quot;mode&quot;:&quot;REQUIRED&quot;},&quot;schema&quot;:1.0,&quot;rowcount&quot;:1500.0,&quot;nonnullrowcount&quot;:1500.0,&quot;ndv&quot;:1500,&quot;avgwidth&quot;:4.0,&quot;histogram&quot;:{&quot;category&quot;:&quot;numeric-equi-depth&quot;,&quot;numRowsPerBucket&quot;:150,&quot;buckets&quot;:[2. [...]
-+--------------------+--------------------------------------------------------------------------------------+  
+|--------------------|----------------------------------------------------------------------------------|
+| v1                 | [{"computed":"2019-04-30","columns":[{"column":"`c_custkey`","majortype":{"type":"INT","mode":"REQUIRED"},"schema":1.0,"rowcount":1500.0,"nonnullrowcount":1500.0,"ndv":1500,"avgwidth":4.0,"histogram":{"category":"numeric-equi-depth","numRowsPerBucket":150,"buckets":[2.0,149.0,299.0,450.99999999999994,599.0,749.0,900.9999999999999,1049.0,1199.0,1349.0,1500.0]}},{"column":"`c_name`","majortype":{"type":"VARCHAR","mode":"REQUIRED"},"schema":1.0,"rowcount":1500.0,"nonn [...]
+|--------------------|--------------------------------------------------------------------------------------|  
 
 SELECT t.directories.columns[0].ndv as ndv, t.directories.columns[0].rowcount as rc, t.directories.columns[0].nonnullrowcount AS nnrc, t.directories.columns[0].histogram as histogram FROM `table_stats/Tpch0.01/parquet/customer/.stats.drill` t;
-+------+--------+--------+----------------------------------------------------------------------------------+
+|------|--------|--------|----------------------------------------------------------------------------------|
 | ndv  |   rc   |  nnrc  |                                    histogram                                     |
-+------+--------+--------+----------------------------------------------------------------------------------+
-| 1500 | 1500.0 | 1500.0 | {&quot;category&quot;:&quot;numeric-equi-depth&quot;,&quot;numRowsPerBucket&quot;:150,&quot;buckets&quot;:[2.0,149.0,299.0,450.99999999999994,599.0,749.0,900.9999999999999,1049.0,1199.0,1349.0,1500.0]}             |
-+------+--------+--------+----------------------------------------------------------------------------------+
-</code></pre></div>
-<h3 id="dropping-statistics">Dropping Statistics</h3>
+|------|--------|--------|----------------------------------------------------------------------------------|
+| 1500 | 1500.0 | 1500.0 | {"category":"numeric-equi-depth","numRowsPerBucket":150,"buckets":[2.0,149.0,299.0,450.99999999999994,599.0,749.0,900.9999999999999,1049.0,1199.0,1349.0,1500.0]}             |
+|------|--------|--------|----------------------------------------------------------------------------------|
+</code></pre></div></div>
 
+<h3 id="dropping-statistics">Dropping Statistics</h3>
 <p>If you want to compute statistics on a table or directory that you have already run the ANALYZE TABLE COMPUTE STATISTICS statement against, you must first drop the statistics before you can run ANALYZE TABLE statement on the table again.</p>
 
 <p>The following example demonstrates how to drop statistics on a table:</p>
-<div class="highlight"><pre><code class="language-text" data-lang="text">DROP TABLE `table_stats/Tpch0.01/parquet/customer/.stats.drill`;
-+------+--------------------------------------------------------------------+
+
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>DROP TABLE `table_stats/Tpch0.01/parquet/customer/.stats.drill`;
+|------|--------------------------------------------------------------------|
 |  ok  |                              summary                               |
-+------+--------------------------------------------------------------------+
+|------|--------------------------------------------------------------------|
 | true | Table [table_stats/Tpch0.01/parquet/customer/.stats.drill] dropped |
-+------+--------------------------------------------------------------------+
-</code></pre></div>
+|------|--------------------------------------------------------------------|
+</code></pre></div></div>
+
 <p>The following example demonstrates how to drop statistics on a directory, assuming that “customer” is a directory that contains Parquet files:</p>
-<div class="highlight"><pre><code class="language-text" data-lang="text">DROP TABLE `table_stats/Tpch0.01/parquet/customer.stats.drill`;
-+-------+------------------------------------+
-|  ok   |            summary                 |
-+-------+------------------------------------+
+
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>DROP TABLE `table_stats/Tpch0.01/parquet/customer.stats.drill`;
+|-------|------------------------------------|
+|  ok   | 	         summary     	         |
+|-------|------------------------------------|
 | true  | Table [customer.stats.drill] dropped|
-+-------+------------------------------------+
-</code></pre></div>
+|-------|------------------------------------|
+</code></pre></div></div>
+
 <p>When you drop statistics, the statistics directory no longer exists for the table:</p>
-<div class="highlight"><pre><code class="language-text" data-lang="text">select * from `table_stats/Tpch0.01/parquet/customer/.stats.drill`;
 
-Error: VALIDATION ERROR: From line 1, column 15 to line 1, column 66: Object &#39;table_stats/Tpch0.01/parquet/customer/.stats.drill&#39; not found  
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>select * from `table_stats/Tpch0.01/parquet/customer/.stats.drill`;
+
+Error: VALIDATION ERROR: From line 1, column 15 to line 1, column 66: Object 'table_stats/Tpch0.01/parquet/customer/.stats.drill' not found  
 [Error Id: 886003ca-c64f-4e7d-b4c5-26ee1ca617b8 ] (state=,code=0)
-</code></pre></div>
+</code></pre></div></div>
+
 <h2 id="troubleshooting">Troubleshooting</h2>
 
 <p>Typical errors you may get when running ANALYZE TABLE COMPUTE STATISTICS result from running the statement against an individual file or against a data source other than Parquet, as shown in the following examples:</p>
 
-<p><strong>Running ANALYZE TABLE COMPUTE STATISTICS on a file.</strong>  </p>
-<div class="highlight"><pre><code class="language-text" data-lang="text">ANALYZE TABLE `/parquet/nation.parquet` COMPUTE STATISTICS;
-+--------+----------------------------------------------------------------------------------+
+<p><strong>Running ANALYZE TABLE COMPUTE STATISTICS on a file.</strong></p>
+
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>ANALYZE TABLE `/parquet/nation.parquet` COMPUTE STATISTICS;
+|--------|----------------------------------------------------------------------------------|
 |   ok   |                                     summary                                      |
-+--------+----------------------------------------------------------------------------------+
+|--------|----------------------------------------------------------------------------------|
 | false  | Table /parquet/nation.parquet is not supported by ANALYZE. Support is currently limited to directory-based Parquet tables. |
-+--------+----------------------------------------------------------------------------------+
-</code></pre></div>
+|--------|----------------------------------------------------------------------------------|
+</code></pre></div></div>
+
 <p><strong>Running ANALYZE TABLE COMPUTE STATISTICS on a data source other than Parquet.</strong></p>
-<div class="highlight"><pre><code class="language-text" data-lang="text">ANALYZE TABLE nation1_json COMPUTE STATISTICS;
-+--------+----------------------------------------------------------------------------------+
+
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>ANALYZE TABLE nation1_json COMPUTE STATISTICS;
+|--------|----------------------------------------------------------------------------------|
 |   ok   |                                     summary                                      |
-+--------+----------------------------------------------------------------------------------+
+|--------|----------------------------------------------------------------------------------|
 | false  | Table nation1_json is not supported by ANALYZE. Support is currently limited to directory-based Parquet tables. |
-+--------+----------------------------------------------------------------------------------+
-</code></pre></div>
+|--------|----------------------------------------------------------------------------------|
+</code></pre></div></div>
+
     
       
         <div class="doc-nav">
@@ -1660,7 +1735,7 @@ Error: VALIDATION ERROR: From line 1, column 15 to line 1, column 66: Object &#3
   <p class="push"></p>
 <div id="footer" class="mw">
 <div class="wrapper">
-Copyright © 2012-2014 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
+Copyright © 2012-2020 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
 Apache and the Apache feather logo are trademarks of The Apache Software Foundation. Other names appearing on the site may be trademarks of their respective owners.<br/><br/>
 </div>
 </div>
diff --git a/docs/analyze-table-refresh-metadata/index.html b/docs/analyze-table-refresh-metadata/index.html
index 49a580b..36878b0 100644
--- a/docs/analyze-table-refresh-metadata/index.html
+++ b/docs/analyze-table-refresh-metadata/index.html
@@ -294,8 +294,6 @@
               
                 <li class="toctree-l3"><a class="reference internal" href="/docs/embedded-mode-prerequisites/">Embedded Mode Prerequisites</a></li>
               
-                <li class="toctree-l3"><a class="reference internal" href="/docs/running-drill-on-docker/">Running Drill on Docker</a></li>
-              
                 <li class="toctree-l3"><a class="reference internal" href="/docs/installing-drill-on-linux-and-mac-os-x/">Installing Drill on Linux and Mac OS X</a></li>
               
                 <li class="toctree-l3"><a class="reference internal" href="/docs/starting-drill-on-linux-and-mac-os-x/">Starting Drill on Linux and Mac OS X</a></li>
@@ -459,10 +457,6 @@
             
           
             
-              <li class="toctree-l2"><a class="reference internal" href="/docs/logfile-plugin/">Logfile Plugin</a></li>
-            
-          
-            
               <li class="toctree-l2"><a class="reference internal" href="/docs/hbase-storage-plugin/">HBase Storage Plugin</a></li>
             
           
@@ -487,10 +481,6 @@
             
           
             
-              <li class="toctree-l2"><a class="reference internal" href="/docs/httpd-format-plugin/">HTTPD Format Plugin</a></li>
-            
-          
-            
               <li class="toctree-l2"><a class="reference internal" href="/docs/opentsdb-storage-plugin/">OpenTSDB Storage Plugin</a></li>
             
           
@@ -499,19 +489,15 @@
             
           
             
-              <li class="toctree-l2"><a class="reference internal" href="/docs/image-metadata-format-plugin/">Image Metadata Format Plugin</a></li>
-            
-          
-            
               <li class="toctree-l2"><a class="reference internal" href="/docs/azure-blob-storage-plugin/">Azure Blob Storage Plugin</a></li>
             
           
             
-              <li class="toctree-l2"><a class="reference internal" href="/docs/syslog-format-plugin/">Syslog Format Plugin</a></li>
+              <li class="toctree-l2"><a class="reference internal" href="/docs/druid-storage-plugin/">Druid Storage Plugin</a></li>
             
           
             
-              <li class="toctree-l2"><a class="reference internal" href="/docs/ltsv-format-plugin/">LTSV Format Plugin</a></li>
+              <li class="toctree-l2"><a class="reference internal" href="/docs/http-storage-plugin/">HTTP Storage Plugin</a></li>
             
           
           </ul>
@@ -886,6 +872,12 @@
               
                 <li class="toctree-l3"><a class="reference internal" href="/docs/cryptography-functions/">Cryptography Functions</a></li>
               
+                <li class="toctree-l3"><a class="reference internal" href="/docs/sql-dialect-compatibility-functions/">SQL dialect compatibility functions</a></li>
+              
+                <li class="toctree-l3"><a class="reference internal" href="/docs/gis-functions/">GIS functions</a></li>
+              
+                <li class="toctree-l3"><a class="reference internal" href="/docs/time-series-analysis-functions/">Time Series Analysis Functions</a></li>
+              
             </ul>
             
           
@@ -1043,6 +1035,10 @@
             
           
             
+              <li class="toctree-l2"><a class="reference internal" href="/docs/logfile-plugin/">Logfile Plugin</a></li>
+            
+          
+            
               <li class="toctree-l2"><a class="reference internal" href="/docs/json-data-model/">JSON Data Model</a></li>
             
           
@@ -1054,6 +1050,38 @@
               <li class="toctree-l2"><a class="reference internal" href="/docs/sequence-files/">Sequence Files</a></li>
             
           
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/httpd-format-plugin/">HTTPD Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/image-metadata-format-plugin/">Image Metadata Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/syslog-format-plugin/">Syslog Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/ltsv-format-plugin/">LTSV Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/spss-format-plugin/">SPSS Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/esri-shapefile-format-plugin/">ESRI Shapefile Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/excel-format-plugin/">Excel Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/hdf5-format-plugin/">HDF5 Format Plugin</a></li>
+            
+          
           </ul>
         
       
@@ -1171,6 +1199,10 @@
           <ul style="display: none">
           
             
+              <li class="toctree-l2"><a class="reference internal" href="/docs/apache-drill-1-18-0-release-notes/">Apache Drill 1.18.0 Release Notes</a></li>
+            
+          
+            
               <li class="toctree-l2"><a class="reference internal" href="/docs/apache-drill-1-17-0-release-notes/">Apache Drill 1.17.0 Release Notes</a></li>
             
           
@@ -1337,9 +1369,9 @@
 
     </div>
 
-     Mar 17, 2020
-
-    <link href="/css/docpage.css" rel="stylesheet" type="text/css">
+    <!-- jt: we don't need to display a last-modified date on each page to users
+     Dec 10, 2020
+    --> 
 
     <div class="int_text" align="left">
       
@@ -1347,20 +1379,24 @@
 This metadata will be used when querying a table for more optimal plan creation.</p>
 
 <p>To use the Drill Metastore, you must enable it at the session or system level with one of the following commands:</p>
-<div class="highlight"><pre><code class="language-text" data-lang="text">SET `metastore.enabled` = true;
+
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>SET `metastore.enabled` = true;
 ALTER SYSTEM SET `metastore.enabled` = true;
-</code></pre></div>
+</code></pre></div></div>
+
 <p>Please refer to <a href="/docs/using-drill-metastore">Using Drill Metastore</a> for more details about Drill Metastore including its purpose and how to use it.</p>
 
 <h2 id="syntax">Syntax</h2>
 
 <p>The ANALYZE TABLE REFRESH METADATA statement supports the following syntax:</p>
-<div class="highlight"><pre><code class="language-text" data-lang="text">ANALYZE TABLE [table_name | table({table function name}(parameters))]
+
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>ANALYZE TABLE [table_name | table({table function name}(parameters))]
 [COLUMNS {(col1, col2, ...) | NONE}]
-REFRESH METADATA [&#39;level&#39; LEVEL]
+REFRESH METADATA ['level' LEVEL]
 [{COMPUTE | ESTIMATE} | STATISTICS
 [ SAMPLE number PERCENT ]]
-</code></pre></div>
+</code></pre></div></div>
+
 <h2 id="parameters">Parameters</h2>
 
 <p><em>table_name</em>
@@ -1370,19 +1406,21 @@ The name of the table or directory for which Drill will collect table metadata.
 <p><em>table({table function name}(parameters))</em>
 Table function parameters. This syntax is only available since Drill 1.18.
 Example of table function parameters usage:</p>
-<div class="highlight"><pre><code class="language-text" data-lang="text"> table(dfs.tmp.`text_nation` (type=&gt;&#39;text&#39;, fieldDelimiter=&gt;&#39;,&#39;, extractHeader=&gt;true,
-    schema=&gt;&#39;inline=(
+
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code> table(dfs.tmp.`text_nation` (type=&gt;'text', fieldDelimiter=&gt;',', extractHeader=&gt;true,
+    schema=&gt;'inline=(
         `n_nationkey` INT not null,
         `n_name` VARCHAR not null,
         `n_regionkey` INT not null,
-        `n_comment` VARCHAR not null)&#39;
+        `n_comment` VARCHAR not null)'
     ))
-</code></pre></div>
+</code></pre></div></div>
+
 <p>Please refer to
  <a href="/docs/plugin-configuration-basics/#specifying-the-schema-as-table-function-parameter">Specifying the Schema as Table Function Parameter</a>
  for the details.</p>
 
-<p><em>COLUMNS (col1, col2, ...)</em>
+<p><em>COLUMNS (col1, col2, …)</em>
 Optional names of the column(s) for which Drill will compute and store statistics. The stored schema will include all
  table columns.</p>
 
@@ -1394,52 +1432,58 @@ Optional VARCHAR literal which specifies maximum level depth for collecting meta
 Possible values:</p>
 
 <ul>
-<li><code>TABLE</code> - metadata will be collected at table level (MIN / MAX column values within whole the table, etc.);</li>
-<li><code>SEGMENT</code> - metadata will be collected for every segment within the table (MIN / MAX column values within a specific
-segment) + metadata at table level. Segment term here is used to abstract data part which may correspond to a
-specific directory on the file system or partition in Hive table;</li>
-<li><code>PARTITION</code> - metadata will be collected for every partition within the table (MIN / MAX column values within a
-specific partition) + file, segment and table metadata. Partition term is used here to abstract part of the data
-where some column(s) have the same values (corresponds to existing Drill partitions for Parquet table). Is not
-implemented in Drill 1.17;</li>
-<li><code>FILE</code> - metadata will be collected for every file within the table (MIN / MAX column values within a specific file) + 
-partition, segment metadata and table metadata;</li>
-<li><code>ROW_GROUP</code> - metadata will be collected for every row group within the table (MIN / MAX column values within a
-specific row group) + file, partition, segment metadata and table metadata. Supported for Parquet tables only;</li>
-<li><code>ALL</code> - metadata will be collected for every splittable table part - row groups for parquet, files for regular file
-storage formats, etc.
-Default is <code>ALL</code>.</li>
+  <li><code class="language-plaintext highlighter-rouge">TABLE</code> - metadata will be collected at table level (MIN / MAX column values within whole the table, etc.);</li>
+  <li><code class="language-plaintext highlighter-rouge">SEGMENT</code> - metadata will be collected for every segment within the table (MIN / MAX column values within a specific
+ segment) + metadata at table level. Segment term here is used to abstract data part which may correspond to a
+ specific directory on the file system or partition in Hive table;</li>
+  <li><code class="language-plaintext highlighter-rouge">PARTITION</code> - metadata will be collected for every partition within the table (MIN / MAX column values within a
+ specific partition) + file, segment and table metadata. Partition term is used here to abstract part of the data
+ where some column(s) have the same values (corresponds to existing Drill partitions for Parquet table). Is not
+ implemented in Drill 1.17;</li>
+  <li><code class="language-plaintext highlighter-rouge">FILE</code> - metadata will be collected for every file within the table (MIN / MAX column values within a specific file) + 
+ partition, segment metadata and table metadata;</li>
+  <li><code class="language-plaintext highlighter-rouge">ROW_GROUP</code> - metadata will be collected for every row group within the table (MIN / MAX column values within a
+ specific row group) + file, partition, segment metadata and table metadata. Supported for Parquet tables only;</li>
+  <li><code class="language-plaintext highlighter-rouge">ALL</code> - metadata will be collected for every splittable table part - row groups for parquet, files for regular file
+ storage formats, etc.
+Default is <code class="language-plaintext highlighter-rouge">ALL</code>.</li>
 </ul>
 
 <p><em>COMPUTE</em>
 Computes statistics for the table to be stored into the Metastore.
-If statistics usage is disabled (<code>planner.statistics.use</code> is set to <code>false</code>), an error will be thrown when this clause is specified.</p>
+If statistics usage is disabled (<code class="language-plaintext highlighter-rouge">planner.statistics.use</code> is set to <code class="language-plaintext highlighter-rouge">false</code>), an error will be thrown when this clause is specified.</p>
 
 <p><em>ESTIMATE</em>
 Computes estimated statistics for the table to be stored into the Metastore. Currently is not supported.</p>
 
-<p><em>(column1, column2, ...)</em>
+<p><em>(column1, column2, …)</em>
 The name of the column(s) for which Drill will compute statistics.</p>
 
 <p><em>SAMPLE</em>
 Optional. Indicates that compute statistics should run on a subset of the data.</p>
 
-<p><em>number PERCENT</em><br>
+<p><em>number PERCENT</em><br />
 An integer that specifies the percentage of data on which to compute statistics. For example, if a table has 100 rows,
- <code>SAMPLE 50 PERCENT</code> indicates that statistics should be computed on 50 rows. The optimizer selects the rows at random. </p>
+ <code class="language-plaintext highlighter-rouge">SAMPLE 50 PERCENT</code> indicates that statistics should be computed on 50 rows. The optimizer selects the rows at random.</p>
 
 <h2 id="related-commands">Related Commands</h2>
 
 <p>Use the following command to remove a table from the Metastore:</p>
-<div class="highlight"><pre><code class="language-text" data-lang="text">ANALYZE TABLE [table_name] DROP [METADATA|STATISTICS] [IF EXISTS]
-</code></pre></div>
-<p>The command will fail if the table does not exist in the Metastore. Include the <code>IF EXISTS</code> clause to ignore a missing table.</p>
-<div class="highlight"><pre><code class="language-text" data-lang="text">ANALYZE TABLE [plugin.schema.]table_name COMPUTE STATISTICS [(column1, column2,...)] [SAMPLE number PERCENT]
-</code></pre></div>
+
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>ANALYZE TABLE [table_name] DROP [METADATA|STATISTICS] [IF EXISTS]
+</code></pre></div></div>
+
+<p>The command will fail if the table does not exist in the Metastore. Include the <code class="language-plaintext highlighter-rouge">IF EXISTS</code> clause to ignore a missing table.</p>
+
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>ANALYZE TABLE [plugin.schema.]table_name COMPUTE STATISTICS [(column1, column2,...)] [SAMPLE number PERCENT]
+</code></pre></div></div>
+
 <p>See <a href="/docs/analyze-table-compute-statistics">ANALYZE TABLE COMPUTE STATISTICS</a>.</p>
-<div class="highlight"><pre><code class="language-text" data-lang="text">REFRESH TABLE METADATA  [ COLUMNS ( column1, column2...) | NONE ]  table_path
-</code></pre></div>
-<p>For the case when table metadata was stored into the Drill Metastore, Parquet table metadata cache files, wouldn&#39;t be
+
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>REFRESH TABLE METADATA  [ COLUMNS ( column1, column2...) | NONE ]  table_path
+</code></pre></div></div>
+
+<p>For the case when table metadata was stored into the Drill Metastore, Parquet table metadata cache files, wouldn’t be
  used for the same table during query execution if all required metadata is present and is not outdated.</p>
 
 <h2 id="usage-notes">Usage Notes</h2>
@@ -1462,7 +1506,7 @@ An integer that specifies the percentage of data on which to compute statistics.
   <p class="push"></p>
 <div id="footer" class="mw">
 <div class="wrapper">
-Copyright © 2012-2014 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
+Copyright © 2012-2020 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
 Apache and the Apache feather logo are trademarks of The Apache Software Foundation. Other names appearing on the site may be trademarks of their respective owners.<br/><br/>
 </div>
 </div>
diff --git a/docs/analyzing-data-using-window-functions/index.html b/docs/analyzing-data-using-window-functions/index.html
index 1c1dc9f..2663aa2 100644
--- a/docs/analyzing-data-using-window-functions/index.html
+++ b/docs/analyzing-data-using-window-functions/index.html
@@ -294,8 +294,6 @@
               
                 <li class="toctree-l3"><a class="reference internal" href="/docs/embedded-mode-prerequisites/">Embedded Mode Prerequisites</a></li>
               
-                <li class="toctree-l3"><a class="reference internal" href="/docs/running-drill-on-docker/">Running Drill on Docker</a></li>
-              
                 <li class="toctree-l3"><a class="reference internal" href="/docs/installing-drill-on-linux-and-mac-os-x/">Installing Drill on Linux and Mac OS X</a></li>
               
                 <li class="toctree-l3"><a class="reference internal" href="/docs/starting-drill-on-linux-and-mac-os-x/">Starting Drill on Linux and Mac OS X</a></li>
@@ -459,10 +457,6 @@
             
           
             
-              <li class="toctree-l2"><a class="reference internal" href="/docs/logfile-plugin/">Logfile Plugin</a></li>
-            
-          
-            
               <li class="toctree-l2"><a class="reference internal" href="/docs/hbase-storage-plugin/">HBase Storage Plugin</a></li>
             
           
@@ -487,10 +481,6 @@
             
           
             
-              <li class="toctree-l2"><a class="reference internal" href="/docs/httpd-format-plugin/">HTTPD Format Plugin</a></li>
-            
-          
-            
               <li class="toctree-l2"><a class="reference internal" href="/docs/opentsdb-storage-plugin/">OpenTSDB Storage Plugin</a></li>
             
           
@@ -499,19 +489,15 @@
             
           
             
-              <li class="toctree-l2"><a class="reference internal" href="/docs/image-metadata-format-plugin/">Image Metadata Format Plugin</a></li>
-            
-          
-            
               <li class="toctree-l2"><a class="reference internal" href="/docs/azure-blob-storage-plugin/">Azure Blob Storage Plugin</a></li>
             
           
             
-              <li class="toctree-l2"><a class="reference internal" href="/docs/syslog-format-plugin/">Syslog Format Plugin</a></li>
+              <li class="toctree-l2"><a class="reference internal" href="/docs/druid-storage-plugin/">Druid Storage Plugin</a></li>
             
           
             
-              <li class="toctree-l2"><a class="reference internal" href="/docs/ltsv-format-plugin/">LTSV Format Plugin</a></li>
+              <li class="toctree-l2"><a class="reference internal" href="/docs/http-storage-plugin/">HTTP Storage Plugin</a></li>
             
           
           </ul>
@@ -886,6 +872,12 @@
               
                 <li class="toctree-l3"><a class="reference internal" href="/docs/cryptography-functions/">Cryptography Functions</a></li>
               
+                <li class="toctree-l3"><a class="reference internal" href="/docs/sql-dialect-compatibility-functions/">SQL dialect compatibility functions</a></li>
+              
+                <li class="toctree-l3"><a class="reference internal" href="/docs/gis-functions/">GIS functions</a></li>
+              
+                <li class="toctree-l3"><a class="reference internal" href="/docs/time-series-analysis-functions/">Time Series Analysis Functions</a></li>
+              
             </ul>
             
           
@@ -1043,6 +1035,10 @@
             
           
             
+              <li class="toctree-l2"><a class="reference internal" href="/docs/logfile-plugin/">Logfile Plugin</a></li>
+            
+          
+            
               <li class="toctree-l2"><a class="reference internal" href="/docs/json-data-model/">JSON Data Model</a></li>
             
           
@@ -1054,6 +1050,38 @@
               <li class="toctree-l2"><a class="reference internal" href="/docs/sequence-files/">Sequence Files</a></li>
             
           
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/httpd-format-plugin/">HTTPD Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/image-metadata-format-plugin/">Image Metadata Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/syslog-format-plugin/">Syslog Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/ltsv-format-plugin/">LTSV Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/spss-format-plugin/">SPSS Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/esri-shapefile-format-plugin/">ESRI Shapefile Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/excel-format-plugin/">Excel Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/hdf5-format-plugin/">HDF5 Format Plugin</a></li>
+            
+          
           </ul>
         
       
@@ -1171,6 +1199,10 @@
           <ul style="display: none">
           
             
+              <li class="toctree-l2"><a class="reference internal" href="/docs/apache-drill-1-18-0-release-notes/">Apache Drill 1.18.0 Release Notes</a></li>
+            
+          
+            
               <li class="toctree-l2"><a class="reference internal" href="/docs/apache-drill-1-17-0-release-notes/">Apache Drill 1.17.0 Release Notes</a></li>
             
           
@@ -1335,246 +1367,277 @@
 
     </div>
 
-     Nov 2, 2018
-
-    <link href="/css/docpage.css" rel="stylesheet" type="text/css">
+    <!-- jt: we don't need to display a last-modified date on each page to users
+     Dec 10, 2020
+    --> 
 
     <div class="int_text" align="left">
       
         <p>This tutorial briefly introduces the analytics in Drill 1.2, namely ANSI SQL-compliant analytic and window functions. Drill supports the following SQL window functions:</p>
 
 <ul>
-<li>PARTITION BY and OVER clauses</li>
-<li>A variety of aggregated window functions for Sum, Max, Min, Count, Avg</li>
-<li>Analytic functions such as First_Value, Last_Value, Lead, Lag, NTile, Row_Number, and Rank</li>
+  <li>PARTITION BY and OVER clauses</li>
+  <li>A variety of aggregated window functions for Sum, Max, Min, Count, Avg</li>
+  <li>Analytic functions such as First_Value, Last_Value, Lead, Lag, NTile, Row_Number, and Rank</li>
 </ul>
 
 <p>Window functions are highly versatile. You can reduce the joins, subqueries, and explicit cursors that you need to write. Window functions solve a variety of use cases with minimal coding effort.</p>
 
-<p>This tutorial builds on previous tutorials, <a href="/docs/analyzing-the-yelp-academic-dataset/">Analyzing the Yelp Academic Dataset</a> and <a href="/docs/analyzing-highly-dynamic-datasets/">Analyzing Highly Dynamic Datasets</a>, and uses the same Yelp dataset. </p>
+<p>This tutorial builds on previous tutorials, <a href="/docs/analyzing-the-yelp-academic-dataset/">Analyzing the Yelp Academic Dataset</a> and <a href="/docs/analyzing-highly-dynamic-datasets/">Analyzing Highly Dynamic Datasets</a>, and uses the same Yelp dataset.</p>
 
-<hr>
+<hr />
 
 <h2 id="getting-started">Getting Started</h2>
 
 <ol>
-<li><p>To get started, download the <a href="http://www.yelp.com/dataset_challenge">Yelp</a> (business reviews) now. </p></li>
-<li><p><a href="/docs/analyzing-the-yelp-academic-dataset/#installing-and-starting-drill">Install and start Drill</a>. </p></li>
-<li><p>List the available schemas in Drill.</p>
-<div class="highlight"><pre><code class="language-text" data-lang="text">SHOW schemas;
-+---------------------+
-|     SCHEMA_NAME     |
-+---------------------+
-| INFORMATION_SCHEMA  |
-| cp.default          |
-| dfs.default         |
-| dfs.root            |
-| dfs.tmp             |
-| dfs.yelp            |
-| sys                 |
-+---------------------+
+  <li>
+    <p>To get started, download the <a href="http://www.yelp.com/dataset_challenge">Yelp</a> (business reviews) now.</p>
+  </li>
+  <li>
+    <p><a href="/docs/analyzing-the-yelp-academic-dataset/#installing-and-starting-drill">Install and start Drill</a>.</p>
+  </li>
+  <li>
+    <p>List the available schemas in Drill.</p>
 
-7 rows selected (1.755 seconds)
-</code></pre></div></li>
-<li><p>Switch to using the workspace in which Yelp data is loaded.</p>
-<div class="highlight"><pre><code class="language-text" data-lang="text">USE dfs.yelp;
+    <div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code> SHOW schemas;
+ |---------------------|
+ |     SCHEMA_NAME     |
+ |---------------------|
+ | INFORMATION_SCHEMA  |
+ | cp.default          |
+ | dfs.default         |
+ | dfs.root            |
+ | dfs.tmp             |
+ | dfs.yelp            |
+ | sys                 |
+ |---------------------|
 
-+-------+---------------------------------------+
-|  ok   |                summary                |
-+-------+---------------------------------------+
-| true  | Default schema changed to [dfs.yelp]  |
-+-------+---------------------------------------+
+ 7 rows selected (1.755 seconds)
+</code></pre></div>    </div>
+  </li>
+  <li>
+    <p>Switch to using the workspace in which Yelp data is loaded.</p>
+
+    <div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code> USE dfs.yelp;
+
+ |-------|---------------------------------------|
+ |  ok   |                summary                |
+ |-------|---------------------------------------|
+ | true  | Default schema changed to [dfs.yelp]  |
+ |-------|---------------------------------------|
+
+ 1 row selected (0.129 seconds)
+</code></pre></div>    </div>
+  </li>
+  <li>
+    <p>Start with exploring one of the datasets available in Yelp dataset - the business information.</p>
 
-1 row selected (0.129 seconds)
-</code></pre></div></li>
-<li><p>Start with exploring one of the datasets available in Yelp dataset - the business information.</p>
-<div class="highlight"><pre><code class="language-text" data-lang="text">SELECT * FROM `business.json` LIMIT 1;
+    <div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code> SELECT * FROM `business.json` LIMIT 1;
 
-+------------------------+-----------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+------+--------------------------------+---------+--------------+-------------------+-------------+-------+-------+-----------+--------------------------------------- [...]
-| business_id            | full_address                                        | hours                                                                                                                                                                                                                                               | open |         categories             |   city  | review_count |        name       |  longitude  | state | stars |  latitude | attributes                             [...]
-+------------------------+--------------+------+-------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+------+--------------------------------+---------+--------------+-------------------+-------------+-------+-------+-----------+--------------------------------------- [...]
-| vcNAWiLM4dR7D2nwwJ7nCA | 4840 E Indian School Rd Ste 101 Phoenix, AZ 85018   | {&quot;Tuesday&quot;:{&quot;close&quot;:&quot;17:00&quot;,&quot;open&quot;:&quot;08:00&quot;},&quot;Friday&quot;:{&quot;close&quot;:&quot;17:00&quot;,&quot;open&quot;:&quot;08:00&quot;},&quot;Monday&quot;:{&quot;close&quot;:&quot;17:00&quot;,&quot;open&quot;:&quot;08:00&quot;},&quot;Wednesday&quot;:{&quot;close&quot;:&quot;17:00&quot;,&quot;open&quot;:&quot;08:00&quot;},&quot;Thursday&quot;:{&quot;close&quot [...]
-+-------------+--------------+-------+------+------------+------+--------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+------+--------------------------------+---------+--------------+-------------------+-------------+-------+-------+-----------+--------------------------------------- [...]
-1 row selected (0.514 seconds)
-</code></pre></div></li>
+ |------------------------|-----------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------|--------------------------------|---------|--------------|-------------------|-------------|-------|-------|-----------|-------------------------------------- [...]
+ | business_id            | full_address                                        | hours                                                                                                                                                                                                                                               | open |         categories             |   city  | review_count |        name       |  longitude  | state | stars |  latitude | attributes                            [...]
+ |------------------------|--------------|------|-------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------|--------------------------------|---------|--------------|-------------------|-------------|-------|-------|-----------|-------------------------------------- [...]
+ | vcNAWiLM4dR7D2nwwJ7nCA | 4840 E Indian School Rd Ste 101 Phoenix, AZ 85018   | {"Tuesday":{"close":"17:00","open":"08:00"},"Friday":{"close":"17:00","open":"08:00"},"Monday":{"close":"17:00","open":"08:00"},"Wednesday":{"close":"17:00","open":"08:00"},"Thursday":{"close":"17:00","open":"08:00"},"Sunday":{},"Saturday":{}} | true | ["Doctors","Health &amp; Medical"] | Phoenix |      7       | Eric Goldberg, MD | -111.983758 |   AZ  |  3.5  | 33.499313 | {"By Appointment Only":true,"Good [...]
+ |-------------|--------------|-------|------|------------|------|--------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------|--------------------------------|---------|--------------|-------------------|-------------|-------|-------|-----------|-------------------------------------- [...]
+ 1 row selected (0.514 seconds)
+</code></pre></div>    </div>
+  </li>
 </ol>
 
-<hr>
+<hr />
 
 <h2 id="use-window-functions-for-simple-queries">Use Window Functions for Simple Queries</h2>
 
 <ol>
-<li><p>Get the top Yelp businesses based on the number reviews in each city and the row number of the business.</p>
-<div class="highlight"><pre><code class="language-text" data-lang="text">SELECT name, city, review_count, row_number()
-OVER (PARTITION BY city ORDER BY review_count DESC) AS rownum 
-FROM `business.json` LIMIT 15;  
+  <li>
+    <p>Get the top Yelp businesses based on the number reviews in each city and the row number of the business.</p>
+
+    <div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code> SELECT name, city, review_count, row_number()
+ OVER (PARTITION BY city ORDER BY review_count DESC) AS rownum 
+ FROM `business.json` LIMIT 15;  
+
+ |----------------------------------------|------------|---------------|---------|
+ |                  name                  |    city    | review_count  | rownum  |
+ |----------------------------------------|------------|---------------|---------|
+ | Cupz N' Crepes                         | Ahwatukee  | 124           | 1       |
+ | My Wine Cellar                         | Ahwatukee  | 98            | 2       |
+ | Kathy's Alterations                    | Ahwatukee  | 12            | 3       |
+ | McDonald's                             | Ahwatukee  | 7             | 4       |
+ | U-Haul                                 | Ahwatukee  | 5             | 5       |
+ | Hi-Health                              | Ahwatukee  | 4             | 6       |
+ | Healthy and Clean Living Environments  | Ahwatukee  | 4             | 7       |
+ | Active Kids Pediatrics                 | Ahwatukee  | 4             | 8       |
+ | Roberto's Authentic Mexican Food       | Anthem     | 117           | 1       |
+ | Q to U BBQ                             | Anthem     | 74            | 2       |
+ | Outlets At Anthem                      | Anthem     | 64            | 3       |
+ | Dara Thai                              | Anthem     | 56            | 4       |
+ | Cafe Provence                          | Anthem     | 53            | 5       |
+ | Shanghai Club                          | Anthem     | 50            | 6       |
+ | Two Brothers Kitchen                   | Anthem     | 43            | 7       |
+ |----------------------------------------|------------|---------------|---------|
+ 15 rows selected (0.67 seconds)
+</code></pre></div>    </div>
+  </li>
+  <li>
+    <p>Check the number reviews for each business compared to the average number of reviews across all business in the city.</p>
 
-+----------------------------------------+------------+---------------+---------+
-|                  name                  |    city    | review_count  | rownum  |
-+----------------------------------------+------------+---------------+---------+
-| Cupz N&#39; Crepes                         | Ahwatukee  | 124           | 1       |
-| My Wine Cellar                         | Ahwatukee  | 98            | 2       |
-| Kathy&#39;s Alterations                    | Ahwatukee  | 12            | 3       |
-| McDonald&#39;s                             | Ahwatukee  | 7             | 4       |
-| U-Haul                                 | Ahwatukee  | 5             | 5       |
-| Hi-Health                              | Ahwatukee  | 4             | 6       |
-| Healthy and Clean Living Environments  | Ahwatukee  | 4             | 7       |
-| Active Kids Pediatrics                 | Ahwatukee  | 4             | 8       |
-| Roberto&#39;s Authentic Mexican Food       | Anthem     | 117           | 1       |
-| Q to U BBQ                             | Anthem     | 74            | 2       |
-| Outlets At Anthem                      | Anthem     | 64            | 3       |
-| Dara Thai                              | Anthem     | 56            | 4       |
-| Cafe Provence                          | Anthem     | 53            | 5       |
-| Shanghai Club                          | Anthem     | 50            | 6       |
-| Two Brothers Kitchen                   | Anthem     | 43            | 7       |
-+----------------------------------------+------------+---------------+---------+
-15 rows selected (0.67 seconds)
-</code></pre></div></li>
-<li><p>Check the number reviews for each business compared to the average number of reviews across all business in the city.</p>
-<div class="highlight"><pre><code class="language-text" data-lang="text">SELECT name, city,review_count,
-Avg(review_count) OVER (PARTITION BY City) AS city_reviews_avg
-FROM `business.json` LIMIT 15;
+    <div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code> SELECT name, city,review_count,
+ Avg(review_count) OVER (PARTITION BY City) AS city_reviews_avg
+ FROM `business.json` LIMIT 15;
 
-+----------------------------------------+------------+---------------+---------------------+
-|                  name                  |    city    | review_count  |  city_reviews_avg   |
-+----------------------------------------+------------+---------------+---------------------+
-| Hi-Health                              | Ahwatukee  | 4             | 32.25               |
-| My Wine Cellar                         | Ahwatukee  | 98            | 32.25               |
-| U-Haul                                 | Ahwatukee  | 5             | 32.25               |
-| Cupz N&#39; Crepes                         | Ahwatukee  | 124           | 32.25               |
-| McDonald&#39;s                             | Ahwatukee  | 7             | 32.25               |
-| Kathy&#39;s Alterations                    | Ahwatukee  | 12            | 32.25               |
-| Healthy and Clean Living Environments  | Ahwatukee  | 4             | 32.25               |
-| Active Kids Pediatrics                 | Ahwatukee  | 4             | 32.25               |
-| Anthem Community Center                | Anthem     | 4             | 14.492063492063492  |
-| Scrapbooks To Remember                 | Anthem     | 4             | 14.492063492063492  |
-| Hungry Howie&#39;s Pizza                   | Anthem     | 7             | 14.492063492063492  |
-| Pinata Nueva                           | Anthem     | 3             | 14.492063492063492  |
-| Starbucks Coffee Company               | Anthem     | 13            | 14.492063492063492  |
-| Pizza Hut                              | Anthem     | 6             | 14.492063492063492  |
-| Rays Pizza                             | Anthem     | 19            | 14.492063492063492  |
-+----------------------------------------+------------+---------------+---------------------+
-15 rows selected (0.395 seconds)
-</code></pre></div></li>
-<li><p>Check how the number of reviews for each business contribute to the total number of reviews for all businesses in the city.</p>
-<div class="highlight"><pre><code class="language-text" data-lang="text">SELECT name, city,review_count,
-Sum(review_count) OVER (PARTITION BY City) AS city_reviews_sum
-FROM `business.json`limit 15;
+ |----------------------------------------|------------|---------------|---------------------|
+ |                  name                  |    city    | review_count  |  city_reviews_avg   |
+ |----------------------------------------|------------|---------------|---------------------|
+ | Hi-Health                              | Ahwatukee  | 4             | 32.25               |
+ | My Wine Cellar                         | Ahwatukee  | 98            | 32.25               |
+ | U-Haul                                 | Ahwatukee  | 5             | 32.25               |
+ | Cupz N' Crepes                         | Ahwatukee  | 124           | 32.25               |
+ | McDonald's                             | Ahwatukee  | 7             | 32.25               |
+ | Kathy's Alterations                    | Ahwatukee  | 12            | 32.25               |
+ | Healthy and Clean Living Environments  | Ahwatukee  | 4             | 32.25               |
+ | Active Kids Pediatrics                 | Ahwatukee  | 4             | 32.25               |
+ | Anthem Community Center                | Anthem     | 4             | 14.492063492063492  |
+ | Scrapbooks To Remember                 | Anthem     | 4             | 14.492063492063492  |
+ | Hungry Howie's Pizza                   | Anthem     | 7             | 14.492063492063492  |
+ | Pinata Nueva                           | Anthem     | 3             | 14.492063492063492  |
+ | Starbucks Coffee Company               | Anthem     | 13            | 14.492063492063492  |
+ | Pizza Hut                              | Anthem     | 6             | 14.492063492063492  |
+ | Rays Pizza                             | Anthem     | 19            | 14.492063492063492  |
+ |----------------------------------------|------------|---------------|---------------------|
+ 15 rows selected (0.395 seconds)
+</code></pre></div>    </div>
+  </li>
+  <li>
+    <p>Check how the number of reviews for each business contribute to the total number of reviews for all businesses in the city.</p>
+
+    <div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code> SELECT name, city,review_count,
+ Sum(review_count) OVER (PARTITION BY City) AS city_reviews_sum
+ FROM `business.json`limit 15;
 
-+----------------------------------------+------------+---------------+-------------------+
-|                  name                  |    city    | review_count  | city_reviews_sum  |
-+----------------------------------------+------------+---------------+-------------------+
-| Hi-Health                              | Ahwatukee  | 4             | 258               |
-| My Wine Cellar                         | Ahwatukee  | 98            | 258               |
-| U-Haul                                 | Ahwatukee  | 5             | 258               |
-| Cupz N&#39; Crepes                         | Ahwatukee  | 124           | 258               |
-| McDonald&#39;s                             | Ahwatukee  | 7             | 258               |
-| Kathy&#39;s Alterations                    | Ahwatukee  | 12            | 258               |
-| Healthy and Clean Living Environments  | Ahwatukee  | 4             | 258               |
-| Active Kids Pediatrics                 | Ahwatukee  | 4             | 258               |
-| Anthem Community Center                | Anthem     | 4             | 913               |
-| Scrapbooks To Remember                 | Anthem     | 4             | 913               |
-| Hungry Howie&#39;s Pizza                   | Anthem     | 7             | 913               |
-| Pinata Nueva                           | Anthem     | 3             | 913               |
-| Starbucks Coffee Company               | Anthem     | 13            | 913               |
-| Pizza Hut                              | Anthem     | 6             | 913               |
-| Rays Pizza                             | Anthem     | 19            | 913               |
-+----------------------------------------+------------+---------------+-------------------+
-15 rows selected (0.543 seconds)
-</code></pre></div></li>
+ |----------------------------------------|------------|---------------|-------------------|
+ |                  name                  |    city    | review_count  | city_reviews_sum  |
+ |----------------------------------------|------------|---------------|-------------------|
+ | Hi-Health                              | Ahwatukee  | 4             | 258               |
+ | My Wine Cellar                         | Ahwatukee  | 98            | 258               |
+ | U-Haul                                 | Ahwatukee  | 5             | 258               |
+ | Cupz N' Crepes                         | Ahwatukee  | 124           | 258               |
+ | McDonald's                             | Ahwatukee  | 7             | 258               |
+ | Kathy's Alterations                    | Ahwatukee  | 12            | 258               |
+ | Healthy and Clean Living Environments  | Ahwatukee  | 4             | 258               |
+ | Active Kids Pediatrics                 | Ahwatukee  | 4             | 258               |
+ | Anthem Community Center                | Anthem     | 4             | 913               |
+ | Scrapbooks To Remember                 | Anthem     | 4             | 913               |
+ | Hungry Howie's Pizza                   | Anthem     | 7             | 913               |
+ | Pinata Nueva                           | Anthem     | 3             | 913               |
+ | Starbucks Coffee Company               | Anthem     | 13            | 913               |
+ | Pizza Hut                              | Anthem     | 6             | 913               |
+ | Rays Pizza                             | Anthem     | 19            | 913               |
+ |----------------------------------------|------------|---------------|-------------------|
+ 15 rows selected (0.543 seconds)
+</code></pre></div>    </div>
+  </li>
 </ol>
 
-<hr>
+<hr />
 
 <h2 id="use-window-functions-for-complex-queries">Use Window Functions for Complex Queries</h2>
 
 <ol>
-<li><p>List Top 10 cities and their highest ranked businesses in terms of number of reviews. Use Drill window functions such as rank, dense_rank in these queries.</p>
-<div class="highlight"><pre><code class="language-text" data-lang="text">WITH X
-AS
-(SELECT name, city, review_count,
-RANK()
-OVER (PARTITION BY city
-ORDER BY review_count DESC) AS review_rank
-FROM `business.json`)
-SELECT X.name, X.city, X.review_count
-FROM X
-WHERE X.review_rank =1 ORDER BY review_count DESC LIMIT 10;
+  <li>
+    <p>List Top 10 cities and their highest ranked businesses in terms of number of reviews. Use Drill window functions such as rank, dense_rank in these queries.</p>
 
-+-------------------------------------------+-------------+---------------+
-|                   name                    |    city     | review_count  |
-+-------------------------------------------+-------------+---------------+
-| Mon Ami Gabi                              | Las Vegas   | 4084          |
-| Studio B                                  | Henderson   | 1336          |
-| Phoenix Sky Harbor International Airport  | Phoenix     | 1325          |
-| Four Peaks Brewing Co                     | Tempe       | 1110          |
-| The Mission                               | Scottsdale  | 783           |
-| Joe&#39;s Farm Grill                          | Gilbert     | 770           |
-| The Old Fashioned                         | Madison     | 619           |
-| Cornish Pasty Company                     | Mesa        | 578           |
-| SanTan Brewing Company                    | Chandler    | 469           |
-| Yard House                                | Glendale    | 321           |
-+-------------------------------------------+-------------+---------------+
-10 rows selected (0.49 seconds)
-</code></pre></div></li>
-<li><p>Compare the number of reviews for each business with the top and bottom review counts in the city.</p>
-<div class="highlight"><pre><code class="language-text" data-lang="text">SELECT name, city, review_count,
-FIRST_VALUE(review_count)
-OVER(PARTITION BY city ORDER BY review_count DESC) AS top_review_count,
-LAST_VALUE(review_count)
-OVER(PARTITION BY city ORDER BY review_count DESC) AS bottom_review_count
-FROM `business.json` limit 15;
+    <div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code> WITH X
+ AS
+ (SELECT name, city, review_count,
+ RANK()
+ OVER (PARTITION BY city
+ ORDER BY review_count DESC) AS review_rank
+ FROM `business.json`)
+ SELECT X.name, X.city, X.review_count
+ FROM X
+ WHERE X.review_rank =1 ORDER BY review_count DESC LIMIT 10;
+
+ |-------------------------------------------|-------------|---------------|
+ |                   name                    |    city     | review_count  |
+ |-------------------------------------------|-------------|---------------|
+ | Mon Ami Gabi                              | Las Vegas   | 4084          |
+ | Studio B                                  | Henderson   | 1336          |
+ | Phoenix Sky Harbor International Airport  | Phoenix     | 1325          |
+ | Four Peaks Brewing Co                     | Tempe       | 1110          |
+ | The Mission                               | Scottsdale  | 783           |
+ | Joe's Farm Grill                          | Gilbert     | 770           |
+ | The Old Fashioned                         | Madison     | 619           |
+ | Cornish Pasty Company                     | Mesa        | 578           |
+ | SanTan Brewing Company                    | Chandler    | 469           |
+ | Yard House                                | Glendale    | 321           |
+ |-------------------------------------------|-------------|---------------|
+ 10 rows selected (0.49 seconds)
+</code></pre></div>    </div>
+  </li>
+  <li>
+    <p>Compare the number of reviews for each business with the top and bottom review counts in the city.</p>
 
-+----------------------------------------+------------+---------------+-------------------+----------------------+
-|                  name                  |    city    | review_count  | top_review_count  | bottom_review_count  |
-+----------------------------------------+------------+---------------+-------------------+----------------------+
-| My Wine Cellar                         | Ahwatukee  | 98            | 124               | 12                   |
-| McDonald&#39;s                             | Ahwatukee  | 7             | 124               | 12                   |
-| U-Haul                                 | Ahwatukee  | 5             | 124               | 12                   |
-| Hi-Health                              | Ahwatukee  | 4             | 124               | 12                   |
-| Healthy and Clean Living Environments  | Ahwatukee  | 4             | 124               | 12                   |
-| Active Kids Pediatrics                 | Ahwatukee  | 4             | 124               | 12                   |
-| Cupz N&#39; Crepes                         | Ahwatukee  | 124           | 124               | 12                   |
-| Kathy&#39;s Alterations                    | Ahwatukee  | 12            | 124               | 12                   |
-| Q to U BBQ                             | Anthem     | 74            | 117               | 117                  |
-| Dara Thai                              | Anthem     | 56            | 117               | 117                  |
-| Cafe Provence                          | Anthem     | 53            | 117               | 117                  |
-| Shanghai Club                          | Anthem     | 50            | 117               | 117                  |
-| Two Brothers Kitchen                   | Anthem     | 43            | 117               | 117                  |
-| The Tennessee Grill                    | Anthem     | 32            | 117               | 117                  |
-| Dollyrockers Boutique and Salon        | Anthem     | 30            | 117               | 117                  |
-+----------------------------------------+------------+---------------+-------------------+----------------------+
-15 rows selected (0.516 seconds)
-</code></pre></div></li>
-<li><p>Compare the number of reviews with the number of reviews for the previous and following businesses.</p>
-<div class="highlight"><pre><code class="language-text" data-lang="text">SELECT city, review_count, name,
-LAG(review_count, 1) OVER(PARTITION BY city ORDER BY review_count DESC) 
-AS preceding_count,
-LEAD(review_count, 1) OVER(PARTITION BY city ORDER BY review_count DESC) 
-AS following_count
-FROM `business.json` limit 15;
+    <div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code> SELECT name, city, review_count,
+ FIRST_VALUE(review_count)
+ OVER(PARTITION BY city ORDER BY review_count DESC) AS top_review_count,
+ LAST_VALUE(review_count)
+ OVER(PARTITION BY city ORDER BY review_count DESC) AS bottom_review_count
+ FROM `business.json` limit 15;
 
-+------------+---------------+----------------------------------------+------------------+------------------+
-|    city    | review_count  |                  name                  | preceding_count  | following_count  |
-+------------+---------------+----------------------------------------+------------------+------------------+
-| Ahwatukee  | 124           | Cupz N&#39; Crepes                         | null             | 98               |
-| Ahwatukee  | 98            | My Wine Cellar                         | 124              | 12               |
-| Ahwatukee  | 12            | Kathy&#39;s Alterations                    | 98               | 7                |
-| Ahwatukee  | 7             | McDonald&#39;s                             | 12               | 5                |
-| Ahwatukee  | 5             | U-Haul                                 | 7                | 4                |
-| Ahwatukee  | 4             | Hi-Health                              | 5                | 4                |
-| Ahwatukee  | 4             | Healthy and Clean Living Environments  | 4                | 4                |
-| Ahwatukee  | 4             | Active Kids Pediatrics                 | 4                | null             |
-| Anthem     | 117           | Roberto&#39;s Authentic Mexican Food       | null             | 74               |
-| Anthem     | 74            | Q to U BBQ                             | 117              | 64               |
-| Anthem     | 64            | Outlets At Anthem                      | 74               | 56               |
-| Anthem     | 56            | Dara Thai                              | 64               | 53               |
-| Anthem     | 53            | Cafe Provence                          | 56               | 50               |
-| Anthem     | 50            | Shanghai Club                          | 53               | 43               |
-| Anthem     | 43            | Two Brothers Kitchen                   | 50               | 32               |
-+------------+---------------+----------------------------------------+------------------+------------------+
-15 rows selected (0.518 seconds)
-</code></pre></div></li>
+ |----------------------------------------|------------|---------------|-------------------|----------------------|
+ |                  name                  |    city    | review_count  | top_review_count  | bottom_review_count  |
+ |----------------------------------------|------------|---------------|-------------------|----------------------|
+ | My Wine Cellar                         | Ahwatukee  | 98            | 124               | 12                   |
+ | McDonald's                             | Ahwatukee  | 7             | 124               | 12                   |
+ | U-Haul                                 | Ahwatukee  | 5             | 124               | 12                   |
+ | Hi-Health                              | Ahwatukee  | 4             | 124               | 12                   |
+ | Healthy and Clean Living Environments  | Ahwatukee  | 4             | 124               | 12                   |
+ | Active Kids Pediatrics                 | Ahwatukee  | 4             | 124               | 12                   |
+ | Cupz N' Crepes                         | Ahwatukee  | 124           | 124               | 12                   |
+ | Kathy's Alterations                    | Ahwatukee  | 12            | 124               | 12                   |
+ | Q to U BBQ                             | Anthem     | 74            | 117               | 117                  |
+ | Dara Thai                              | Anthem     | 56            | 117               | 117                  |
+ | Cafe Provence                          | Anthem     | 53            | 117               | 117                  |
+ | Shanghai Club                          | Anthem     | 50            | 117               | 117                  |
+ | Two Brothers Kitchen                   | Anthem     | 43            | 117               | 117                  |
+ | The Tennessee Grill                    | Anthem     | 32            | 117               | 117                  |
+ | Dollyrockers Boutique and Salon        | Anthem     | 30            | 117               | 117                  |
+ |----------------------------------------|------------|---------------|-------------------|----------------------|
+ 15 rows selected (0.516 seconds)
+</code></pre></div>    </div>
+  </li>
+  <li>
+    <p>Compare the number of reviews with the number of reviews for the previous and following businesses.</p>
+
+    <div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code> SELECT city, review_count, name,
+ LAG(review_count, 1) OVER(PARTITION BY city ORDER BY review_count DESC) 
+ AS preceding_count,
+ LEAD(review_count, 1) OVER(PARTITION BY city ORDER BY review_count DESC) 
+ AS following_count
+ FROM `business.json` limit 15;
+
+ |------------|---------------|----------------------------------------|------------------|------------------|
+ |    city    | review_count  |                  name                  | preceding_count  | following_count  |
+ |------------|---------------|----------------------------------------|------------------|------------------|
+ | Ahwatukee  | 124           | Cupz N' Crepes                         | null             | 98               |
+ | Ahwatukee  | 98            | My Wine Cellar                         | 124              | 12               |
+ | Ahwatukee  | 12            | Kathy's Alterations                    | 98               | 7                |
+ | Ahwatukee  | 7             | McDonald's                             | 12               | 5                |
+ | Ahwatukee  | 5             | U-Haul                                 | 7                | 4                |
+ | Ahwatukee  | 4             | Hi-Health                              | 5                | 4                |
+ | Ahwatukee  | 4             | Healthy and Clean Living Environments  | 4                | 4                |
+ | Ahwatukee  | 4             | Active Kids Pediatrics                 | 4                | null             |
+ | Anthem     | 117           | Roberto's Authentic Mexican Food       | null             | 74               |
+ | Anthem     | 74            | Q to U BBQ                             | 117              | 64               |
+ | Anthem     | 64            | Outlets At Anthem                      | 74               | 56               |
+ | Anthem     | 56            | Dara Thai                              | 64               | 53               |
+ | Anthem     | 53            | Cafe Provence                          | 56               | 50               |
+ | Anthem     | 50            | Shanghai Club                          | 53               | 43               |
+ | Anthem     | 43            | Two Brothers Kitchen                   | 50               | 32               |
+ |------------|---------------|----------------------------------------|------------------|------------------|
+ 15 rows selected (0.518 seconds)
+</code></pre></div>    </div>
+  </li>
 </ol>
 
     
@@ -1593,7 +1656,7 @@ FROM `business.json` limit 15;
   <p class="push"></p>
 <div id="footer" class="mw">
 <div class="wrapper">
-Copyright © 2012-2014 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
+Copyright © 2012-2020 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
 Apache and the Apache feather logo are trademarks of The Apache Software Foundation. Other names appearing on the site may be trademarks of their respective owners.<br/><br/>
 </div>
 </div>
diff --git a/docs/analyzing-highly-dynamic-datasets/index.html b/docs/analyzing-highly-dynamic-datasets/index.html
index cc9fe11..5e1effc 100644
--- a/docs/analyzing-highly-dynamic-datasets/index.html
+++ b/docs/analyzing-highly-dynamic-datasets/index.html
@@ -294,8 +294,6 @@
               
                 <li class="toctree-l3"><a class="reference internal" href="/docs/embedded-mode-prerequisites/">Embedded Mode Prerequisites</a></li>
               
-                <li class="toctree-l3"><a class="reference internal" href="/docs/running-drill-on-docker/">Running Drill on Docker</a></li>
-              
                 <li class="toctree-l3"><a class="reference internal" href="/docs/installing-drill-on-linux-and-mac-os-x/">Installing Drill on Linux and Mac OS X</a></li>
               
                 <li class="toctree-l3"><a class="reference internal" href="/docs/starting-drill-on-linux-and-mac-os-x/">Starting Drill on Linux and Mac OS X</a></li>
@@ -459,10 +457,6 @@
             
           
             
-              <li class="toctree-l2"><a class="reference internal" href="/docs/logfile-plugin/">Logfile Plugin</a></li>
-            
-          
-            
               <li class="toctree-l2"><a class="reference internal" href="/docs/hbase-storage-plugin/">HBase Storage Plugin</a></li>
             
           
@@ -487,10 +481,6 @@
             
           
             
-              <li class="toctree-l2"><a class="reference internal" href="/docs/httpd-format-plugin/">HTTPD Format Plugin</a></li>
-            
-          
-            
               <li class="toctree-l2"><a class="reference internal" href="/docs/opentsdb-storage-plugin/">OpenTSDB Storage Plugin</a></li>
             
           
@@ -499,19 +489,15 @@
             
           
             
-              <li class="toctree-l2"><a class="reference internal" href="/docs/image-metadata-format-plugin/">Image Metadata Format Plugin</a></li>
-            
-          
-            
               <li class="toctree-l2"><a class="reference internal" href="/docs/azure-blob-storage-plugin/">Azure Blob Storage Plugin</a></li>
             
           
             
-              <li class="toctree-l2"><a class="reference internal" href="/docs/syslog-format-plugin/">Syslog Format Plugin</a></li>
+              <li class="toctree-l2"><a class="reference internal" href="/docs/druid-storage-plugin/">Druid Storage Plugin</a></li>
             
           
             
-              <li class="toctree-l2"><a class="reference internal" href="/docs/ltsv-format-plugin/">LTSV Format Plugin</a></li>
+              <li class="toctree-l2"><a class="reference internal" href="/docs/http-storage-plugin/">HTTP Storage Plugin</a></li>
             
           
           </ul>
@@ -886,6 +872,12 @@
               
                 <li class="toctree-l3"><a class="reference internal" href="/docs/cryptography-functions/">Cryptography Functions</a></li>
               
+                <li class="toctree-l3"><a class="reference internal" href="/docs/sql-dialect-compatibility-functions/">SQL dialect compatibility functions</a></li>
+              
+                <li class="toctree-l3"><a class="reference internal" href="/docs/gis-functions/">GIS functions</a></li>
+              
+                <li class="toctree-l3"><a class="reference internal" href="/docs/time-series-analysis-functions/">Time Series Analysis Functions</a></li>
+              
             </ul>
             
           
@@ -1043,6 +1035,10 @@
             
           
             
+              <li class="toctree-l2"><a class="reference internal" href="/docs/logfile-plugin/">Logfile Plugin</a></li>
+            
+          
+            
               <li class="toctree-l2"><a class="reference internal" href="/docs/json-data-model/">JSON Data Model</a></li>
             
           
@@ -1054,6 +1050,38 @@
               <li class="toctree-l2"><a class="reference internal" href="/docs/sequence-files/">Sequence Files</a></li>
             
           
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/httpd-format-plugin/">HTTPD Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/image-metadata-format-plugin/">Image Metadata Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/syslog-format-plugin/">Syslog Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/ltsv-format-plugin/">LTSV Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/spss-format-plugin/">SPSS Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/esri-shapefile-format-plugin/">ESRI Shapefile Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/excel-format-plugin/">Excel Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/hdf5-format-plugin/">HDF5 Format Plugin</a></li>
+            
+          
           </ul>
         
       
@@ -1171,6 +1199,10 @@
           <ul style="display: none">
           
             
+              <li class="toctree-l2"><a class="reference internal" href="/docs/apache-drill-1-18-0-release-notes/">Apache Drill 1.18.0 Release Notes</a></li>
+            
+          
+            
               <li class="toctree-l2"><a class="reference internal" href="/docs/apache-drill-1-17-0-release-notes/">Apache Drill 1.17.0 Release Notes</a></li>
             
           
@@ -1335,149 +1367,170 @@
 
     </div>
 
-     Dec 26, 2019
-
-    <link href="/css/docpage.css" rel="stylesheet" type="text/css">
+    <!-- jt: we don't need to display a last-modified date on each page to users
+     Dec 10, 2020
+    --> 
 
     <div class="int_text" align="left">
       
-        <p>Today’s data is dynamic and application-driven. The growth of a new era of business applications driven by industry trends such as web, social, mobile, and Internet of Things are generating datasets with new data types and new data models. These applications are iterative, and the associated data models typically are semi-structured, schema-less and constantly evolving. Semi-structured data models can be complex/nested, schema-less, and capable of having varying fields in ever [...]
+        <p>Today’s data is dynamic and application-driven. The growth of a new era of business applications driven by industry trends such as web, social, mobile, and Internet of Things are generating datasets with new data types and new data models. These applications are iterative, and the associated data models typically are semi-structured, schema-less and constantly evolving. Semi-structured data models can be complex/nested, schema-less, and capable of having varying fields in ever [...]
 
 <p>This tutorial shows you how to natively query dynamic datasets, such as JSON, and derive insights from any type of data in minutes. The dataset used in the example is from the Yelp check-ins dataset, which has the following structure:</p>
-<div class="highlight"><pre><code class="language-text" data-lang="text">check-in
+
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>check-in
 {
-    &#39;type&#39;: &#39;checkin&#39;,
-    &#39;business_id&#39;: (encrypted business id),
-    &#39;checkin_info&#39;: {
-        &#39;0-0&#39;: (number of checkins from 00:00 to 01:00 on all Sundays),
-        &#39;1-0&#39;: (number of checkins from 01:00 to 02:00 on all Sundays),
+    'type': 'checkin',
+    'business_id': (encrypted business id),
+    'checkin_info': {
+        '0-0': (number of checkins from 00:00 to 01:00 on all Sundays),
+        '1-0': (number of checkins from 01:00 to 02:00 on all Sundays),
         ...
-        &#39;14-4&#39;: (number of checkins from 14:00 to 15:00 on all Thursdays),
+        '14-4': (number of checkins from 14:00 to 15:00 on all Thursdays),
         ...
-        &#39;23-6&#39;: (number of checkins from 23:00 to 00:00 on all Saturdays)
+        '23-6': (number of checkins from 23:00 to 00:00 on all Saturdays)
     }, # if there was no checkin for a hour-day block it will not be in the dataset
 }
-</code></pre></div>
+</code></pre></div></div>
+
 <p>It is worth repeating the comment at the bottom of this snippet:</p>
-<div class="highlight"><pre><code class="language-text" data-lang="text">   If there was no checkin for a hour-day block it will not be in the dataset. 
-</code></pre></div>
-<p>The element names that you see in the <code>checkin_info</code> are unknown upfront and can vary for every row. The data, although simple, is highly dynamic data. To analyze the data there is no need to first represent this dataset in a flattened relational structure, as you would using any other SQL on Hadoop technology.</p>
 
-<hr>
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>   If there was no checkin for a hour-day block it will not be in the dataset. 
+</code></pre></div></div>
+
+<p>The element names that you see in the <code class="language-plaintext highlighter-rouge">checkin_info</code> are unknown upfront and can vary for every row. The data, although simple, is highly dynamic data. To analyze the data there is no need to first represent this dataset in a flattened relational structure, as you would using any other SQL on Hadoop technology.</p>
+
+<hr />
 
 <p>Step 1: First download Drill, if you have not yet done so, onto your machine</p>
-<div class="highlight"><pre><code class="language-text" data-lang="text">http://drill.apache.org/download/
-tar -xvf apache-drill-1.17.0.tar
-</code></pre></div>
+
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>http://drill.apache.org/download/
+tar -xvf apache-drill-1.18.0.tar
+</code></pre></div></div>
+
 <p>Install Drill locally on your desktop (embedded mode). You don’t need Hadoop.</p>
 
-<hr>
+<hr />
 
 <p>Step 2: Start the Drill shell.</p>
-<div class="highlight"><pre><code class="language-text" data-lang="text">bin/drill-embedded
-</code></pre></div>
-<hr>
+
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>bin/drill-embedded
+</code></pre></div></div>
+
+<hr />
 
 <p>Step 3: Start analyzing the data using SQL</p>
 
 <p>First, let’s take a look at the dataset:</p>
-<div class="highlight"><pre><code class="language-text" data-lang="text">0: jdbc:drill:zk=local&gt; SELECT * FROM dfs.`/users/nrentachintala/Downloads/yelp/yelp_academic_dataset_checkin.json` limit 2;
-+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+------------+------------------------+
+
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>0: jdbc:drill:zk=local&gt; SELECT * FROM dfs.`/users/nrentachintala/Downloads/yelp/yelp_academic_dataset_checkin.json` limit 2;
+|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------|------------------------|
 |                                                                 checkin_info                                                                                                                                                             |    type    |      business_id       |
-+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+------------+------------------------+
-| {&quot;3-4&quot;:1,&quot;13-5&quot;:1,&quot;6-6&quot;:1,&quot;14-5&quot;:1,&quot;14-6&quot;:1,&quot;14-2&quot;:1,&quot;14-3&quot;:1,&quot;19-0&quot;:1,&quot;11-5&quot;:1,&quot;13-2&quot;:1,&quot;11-6&quot;:2,&quot;11-3&quot;:1,&quot;12-6&quot;:1,&quot;6-5&quot;:1,&quot;5-5&quot;:1,&quot;9-2&quot;:1,&quot;9-5&quot;:1,&quot;9-6&quot;:1,&quot;5-2&quot;:1,&quot;7-6&quot;:1,&quot;7-5&quot;:1,&quot;7-4&quot;:1,&quot;17-5&quot;:1,&quot;8-5&quot;:1,&quot;10-2&quot;:1,&quot;10-5&quot;:1,&quot;1 [...]
-| {&quot;6-6&quot;:2,&quot;6-5&quot;:1,&quot;7-6&quot;:1,&quot;7-5&quot;:1,&quot;8-5&quot;:2,&quot;10-5&quot;:1,&quot;9-3&quot;:1,&quot;12-5&quot;:1,&quot;15-3&quot;:1,&quot;15-5&quot;:1,&quot;15-6&quot;:1,&quot;16-3&quot;:1,&quot;10-0&quot;:1,&quot;15-4&quot;:1,&quot;10-4&quot;:1,&quot;8-2&quot;:1}                                                                                               | checkin    | uGykseHzyS5xAMWoN6YUqA |
-+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+------------+------------------------+
-</code></pre></div>
+|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------|------------------------|
+| {"3-4":1,"13-5":1,"6-6":1,"14-5":1,"14-6":1,"14-2":1,"14-3":1,"19-0":1,"11-5":1,"13-2":1,"11-6":2,"11-3":1,"12-6":1,"6-5":1,"5-5":1,"9-2":1,"9-5":1,"9-6":1,"5-2":1,"7-6":1,"7-5":1,"7-4":1,"17-5":1,"8-5":1,"10-2":1,"10-5":1,"10-6":1} | checkin    | JwUE5GmEO-sH1FuwJgKBlQ |
+| {"6-6":2,"6-5":1,"7-6":1,"7-5":1,"8-5":2,"10-5":1,"9-3":1,"12-5":1,"15-3":1,"15-5":1,"15-6":1,"16-3":1,"10-0":1,"15-4":1,"10-4":1,"8-2":1}                                                                                               | checkin    | uGykseHzyS5xAMWoN6YUqA |
+|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------|------------------------|
+</code></pre></div></div>
+
 <div class="admonition note">
   <p class="first admonition-title">Note</p>
   <p class="last">This document aligns Drill output for example purposes. Drill output is not aligned in this case.  </p>
 </div>
 
-<p>You query the data in JSON files directly. Schema definitions in Hive store are not necessary. The names of the elements within the <code>checkin_info</code> column are different between the first and second row.</p>
+<p>You query the data in JSON files directly. Schema definitions in Hive store are not necessary. The names of the elements within the <code class="language-plaintext highlighter-rouge">checkin_info</code> column are different between the first and second row.</p>
 
 <p>Drill provides a function called KVGEN (Key Value Generator) which is useful when working with complex data that contains arbitrary maps consisting of dynamic and unknown element names such as checkin_info. KVGEN turns the dynamic map into an array of key-value pairs where keys represent the dynamic element names.</p>
 
-<p>Let’s apply KVGEN on the <code>checkin_info</code> element to generate key-value pairs.</p>
-<div class="highlight"><pre><code class="language-text" data-lang="text">0: jdbc:drill:zk=local&gt; SELECT KVGEN(checkin_info) checkins FROM dfs.`/users/nrentachintala/Downloads/yelp/yelp_academic_dataset_checkin.json` LIMIT 2;
-+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- [...]
-|                                                                    checkins                                                                                                                                                                                                                                                                                                                                                                                                                                [...]
-+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- [...]
-| [{&quot;key&quot;:&quot;3-4&quot;,&quot;value&quot;:1},{&quot;key&quot;:&quot;13-5&quot;,&quot;value&quot;:1},{&quot;key&quot;:&quot;6-6&quot;,&quot;value&quot;:1},{&quot;key&quot;:&quot;14-5&quot;,&quot;value&quot;:1},{&quot;key&quot;:&quot;14-6&quot;,&quot;value&quot;:1},{&quot;key&quot;:&quot;14-2&quot;,&quot;value&quot;:1},{&quot;key&quot;:&quot;14-3&quot;,&quot;value&quot;:1},{&quot;key&quot;:&quot;19-0&quot;,&quot;value&quot;:1},{&quot;key&quot;:&quot;11-5&quot;,&quot;value&quot; [...]
-| [{&quot;key&quot;:&quot;6-6&quot;,&quot;value&quot;:2},{&quot;key&quot;:&quot;6-5&quot;,&quot;value&quot;:1},{&quot;key&quot;:&quot;7-6&quot;,&quot;value&quot;:1},{&quot;key&quot;:&quot;7-5&quot;,&quot;value&quot;:1},{&quot;key&quot;:&quot;8-5&quot;,&quot;value&quot;:2},{&quot;key&quot;:&quot;10-5&quot;,&quot;value&quot;:1},{&quot;key&quot;:&quot;9-3&quot;,&quot;value&quot;:1},{&quot;key&quot;:&quot;12-5&quot;,&quot;value&quot;:1},{&quot;key&quot;:&quot;15-3&quot;,&quot;value&quot;:1}, [...]
-+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- [...]
-</code></pre></div>
+<p>Let’s apply KVGEN on the <code class="language-plaintext highlighter-rouge">checkin_info</code> element to generate key-value pairs.</p>
+
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>0: jdbc:drill:zk=local&gt; SELECT KVGEN(checkin_info) checkins FROM dfs.`/users/nrentachintala/Downloads/yelp/yelp_academic_dataset_checkin.json` LIMIT 2;
+|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- [...]
+| checkins                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                   [...]
+|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- [...]
+| [{"key":"3-4","value":1},{"key":"13-5","value":1},{"key":"6-6","value":1},{"key":"14-5","value":1},{"key":"14-6","value":1},{"key":"14-2","value":1},{"key":"14-3","value":1},{"key":"19-0","value":1},{"key":"11-5","value":1},{"key":"13-2","value":1},{"key":"11-6","value":2},{"key":"11-3","value":1},{"key":"12-6","value":1},{"key":"6-5","value":1},{"key":"5-5","value":1},{"key":"9-2","value":1},{"key":"9-5","value":1},{"key":"9-6","value":1},{"key":"5-2","value":1},{"key":"7-6","value":1 [...]
+| [{"key":"6-6","value":2},{"key":"6-5","value":1},{"key":"7-6","value":1},{"key":"7-5","value":1},{"key":"8-5","value":2},{"key":"10-5","value":1},{"key":"9-3","value":1},{"key":"12-5","value":1},{"key":"15-3","value":1},{"key":"15-5","value":1},{"key":"15-6","value":1},{"key":"16-3","value":1},{"key":"10-0","value":1},{"key":"15-4","value":1},{"key":"10-4","value":1},{"key":"8-2","value":1}]                                                                                                 [...]
+|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- [...]
+</code></pre></div></div>
+
 <p>Drill provides another function to operate on complex data called ‘Flatten’ to break the list of key-value pairs resulting from ‘KVGen’ into separate rows to further apply analytic functions on it.</p>
-<div class="highlight"><pre><code class="language-text" data-lang="text">0: jdbc:drill:zk=local&gt; SELECT FLATTEN(KVGEN(checkin_info)) checkins FROM dfs.`/users/nrentachintala/Downloads/yelp/yelp_academic_dataset_checkin.json` LIMIT 20;
-+--------------------------+
-|         checkins         |
-+--------------------------+
-| {&quot;key&quot;:&quot;3-4&quot;,&quot;value&quot;:1}  |
-| {&quot;key&quot;:&quot;13-5&quot;,&quot;value&quot;:1} |
-| {&quot;key&quot;:&quot;6-6&quot;,&quot;value&quot;:1}  |
-| {&quot;key&quot;:&quot;14-5&quot;,&quot;value&quot;:1} |
-| {&quot;key&quot;:&quot;14-6&quot;,&quot;value&quot;:1} |
-| {&quot;key&quot;:&quot;14-2&quot;,&quot;value&quot;:1} |
-| {&quot;key&quot;:&quot;14-3&quot;,&quot;value&quot;:1} |
-| {&quot;key&quot;:&quot;19-0&quot;,&quot;value&quot;:1} |
-| {&quot;key&quot;:&quot;11-5&quot;,&quot;value&quot;:1} |
-| {&quot;key&quot;:&quot;13-2&quot;,&quot;value&quot;:1} |
-| {&quot;key&quot;:&quot;11-6&quot;,&quot;value&quot;:2} |
-| {&quot;key&quot;:&quot;11-3&quot;,&quot;value&quot;:1} |
-| {&quot;key&quot;:&quot;12-6&quot;,&quot;value&quot;:1} |
-| {&quot;key&quot;:&quot;6-5&quot;,&quot;value&quot;:1}  |
-| {&quot;key&quot;:&quot;5-5&quot;,&quot;value&quot;:1}  |
-| {&quot;key&quot;:&quot;9-2&quot;,&quot;value&quot;:1}  |
-| {&quot;key&quot;:&quot;9-5&quot;,&quot;value&quot;:1}  |
-| {&quot;key&quot;:&quot;9-6&quot;,&quot;value&quot;:1}  |
-| {&quot;key&quot;:&quot;5-2&quot;,&quot;value&quot;:1}  |
-| {&quot;key&quot;:&quot;7-6&quot;,&quot;value&quot;:1}  |
-+--------------------------+
-</code></pre></div>
-<p>You can get value from the data quickly by applying both KVGEN and FLATTEN functions on the datasets on the fly--no need for time-consuming schema definitions and data storage in intermediate formats.</p>
+
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>0: jdbc:drill:zk=local&gt; SELECT FLATTEN(KVGEN(checkin_info)) checkins FROM dfs.`/users/nrentachintala/Downloads/yelp/yelp_academic_dataset_checkin.json` LIMIT 20;
+|--------------------------|
+| checkins                 |
+|--------------------------|
+| {"key":"3-4","value":1}  |
+| {"key":"13-5","value":1} |
+| {"key":"6-6","value":1}  |
+| {"key":"14-5","value":1} |
+| {"key":"14-6","value":1} |
+| {"key":"14-2","value":1} |
+| {"key":"14-3","value":1} |
+| {"key":"19-0","value":1} |
+| {"key":"11-5","value":1} |
+| {"key":"13-2","value":1} |
+| {"key":"11-6","value":2} |
+| {"key":"11-3","value":1} |
+| {"key":"12-6","value":1} |
+| {"key":"6-5","value":1}  |
+| {"key":"5-5","value":1}  |
+| {"key":"9-2","value":1}  |
+| {"key":"9-5","value":1}  |
+| {"key":"9-6","value":1}  |
+| {"key":"5-2","value":1}  |
+| {"key":"7-6","value":1}  |
+|--------------------------|
+</code></pre></div></div>
+
+<p>You can get value from the data quickly by applying both KVGEN and FLATTEN functions on the datasets on the fly–no need for time-consuming schema definitions and data storage in intermediate formats.</p>
 
 <p>On the output of flattened data, you use standard SQL functionality such as filters , aggregates, and sort. Let’s see a few examples.</p>
 
 <p><strong>Get the total number of check-ins recorded in the Yelp dataset</strong></p>
-<div class="highlight"><pre><code class="language-text" data-lang="text">0: jdbc:drill:zk=local&gt; SELECT SUM(checkintbl.checkins.`value`) AS TotalCheckins FROM (
+
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>0: jdbc:drill:zk=local&gt; SELECT SUM(checkintbl.checkins.`value`) AS TotalCheckins FROM (
 . . . . . . . . . . . &gt;  SELECT FLATTEN(KVGEN(checkin_info)) checkins FROM dfs.`/users/nrentachintala/Downloads/yelp/yelp_academic_dataset_checkin.json` ) checkintbl
 . . . . . . . . . . . &gt;  ;
-+---------------+
+|---------------|
 | TotalCheckins |
-+---------------+
+|---------------|
 | 4713811       |
-+---------------+
-</code></pre></div>
+|---------------|
+</code></pre></div></div>
+
 <p><strong>Get the number of check-ins specifically for Sunday midnights</strong></p>
-<div class="highlight"><pre><code class="language-text" data-lang="text">0: jdbc:drill:zk=local&gt; SELECT SUM(checkintbl.checkins.`value`) AS SundayMidnightCheckins FROM (
-. . . . . . . . . . . &gt;  SELECT FLATTEN(KVGEN(checkin_info)) checkins FROM dfs.`/users/nrentachintala/Downloads/yelp/yelp_academic_dataset_checkin.json` ) checkintbl WHERE checkintbl.checkins.key=&#39;23-0&#39;;
-+------------------------+
+
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>0: jdbc:drill:zk=local&gt; SELECT SUM(checkintbl.checkins.`value`) AS SundayMidnightCheckins FROM (
+. . . . . . . . . . . &gt;  SELECT FLATTEN(KVGEN(checkin_info)) checkins FROM dfs.`/users/nrentachintala/Downloads/yelp/yelp_academic_dataset_checkin.json` ) checkintbl WHERE checkintbl.checkins.key='23-0';
+|------------------------|
 | SundayMidnightCheckins |
-+------------------------+
+|------------------------|
 | 8575                   |
-+------------------------+
-</code></pre></div>
-<p><strong>Get the number of check-ins per day of the week</strong>  </p>
-<div class="highlight"><pre><code class="language-text" data-lang="text">0: jdbc:drill:zk=local&gt; SELECT `right`(checkintbl.checkins.key,1) WeekDay,sum(checkintbl.checkins.`value`) TotalCheckins from (
+|------------------------|
+</code></pre></div></div>
+
+<p><strong>Get the number of check-ins per day of the week</strong></p>
+
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>0: jdbc:drill:zk=local&gt; SELECT `right`(checkintbl.checkins.key,1) WeekDay,sum(checkintbl.checkins.`value`) TotalCheckins from (
 . . . . . . . . . . . &gt;  select flatten(kvgen(checkin_info)) checkins FROM dfs.`/users/nrentachintala/Downloads/yelp/yelp_academic_dataset_checkin.json`  ) checkintbl GROUP BY `right`(checkintbl.checkins.key,1) ORDER BY TotalCheckins;
-+------------+---------------+
-|  WeekDay   | TotalCheckins |
-+------------+---------------+
-| 1          | 545626        |
-| 0          | 555038        |
-| 2          | 555747        |
-| 3          | 596296        |
-| 6          | 735830        |
-| 4          | 788073        |
-| 5          | 937201        |
-+------------+---------------+
-</code></pre></div>
+|---------|---------------|
+| WeekDay | TotalCheckins |
+|---------|---------------|
+| 1       | 545626        |
+| 0       | 555038        |
+| 2       | 555747        |
+| 3       | 596296        |
+| 6       | 735830        |
+| 4       | 788073        |
+| 5       | 937201        |
+|---------|---------------|
+</code></pre></div></div>
+
 <p><strong>Get the number of check-ins per hour of the day</strong></p>
-<div class="highlight"><pre><code class="language-text" data-lang="text">0: jdbc:drill:zk=local&gt; SELECT SUBSTR(checkintbl.checkins.key,1,strpos(checkintbl.checkins.key,&#39;-&#39;)-1) AS HourOfTheDay ,SUM(checkintbl.checkins.`value`) TotalCheckins FROM (
-. . . . . . . . . . . &gt;  SELECT FLATTEN(KVGEN(checkin_info)) checkins FROM dfs.`/users/nrentachintala/Downloads/yelp/yelp_academic_dataset_checkin.json` ) checkintbl GROUP BY SUBSTR(checkintbl.checkins.key,1,strpos(checkintbl.checkins.key,&#39;-&#39;)-1) ORDER BY TotalCheckins;
-+--------------+---------------+
+
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>0: jdbc:drill:zk=local&gt; SELECT SUBSTR(checkintbl.checkins.key,1,strpos(checkintbl.checkins.key,'-')-1) AS HourOfTheDay ,SUM(checkintbl.checkins.`value`) TotalCheckins FROM (
+. . . . . . . . . . . &gt;  SELECT FLATTEN(KVGEN(checkin_info)) checkins FROM dfs.`/users/nrentachintala/Downloads/yelp/yelp_academic_dataset_checkin.json` ) checkintbl GROUP BY SUBSTR(checkintbl.checkins.key,1,strpos(checkintbl.checkins.key,'-')-1) ORDER BY TotalCheckins;
+|--------------|---------------|
 | HourOfTheDay | TotalCheckins |
-+--------------+---------------+
+|--------------|---------------|
 | 3            | 20357         |
 | 4            | 21076         |
 | 2            | 28116         |
@@ -1502,12 +1555,12 @@ tar -xvf apache-drill-1.17.0.tar
 | 19           | 385381        |
 | 12           | 399797        |
 | 18           | 422022        |
-+--------------+---------------+
-</code></pre></div>
-<hr>
+|--------------|---------------|
+</code></pre></div></div>
 
-<h2 id="summary">Summary</h2>
+<hr />
 
+<h2 id="summary">Summary</h2>
 <p>In this tutorial, you surf both structured and semi-structured data without any upfront schema management or ETL.</p>
 
     
@@ -1526,7 +1579,7 @@ tar -xvf apache-drill-1.17.0.tar
   <p class="push"></p>
 <div id="footer" class="mw">
 <div class="wrapper">
-Copyright © 2012-2014 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
+Copyright © 2012-2020 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
 Apache and the Apache feather logo are trademarks of The Apache Software Foundation. Other names appearing on the site may be trademarks of their respective owners.<br/><br/>
 </div>
 </div>
diff --git a/docs/analyzing-social-media/index.html b/docs/analyzing-social-media/index.html
index e7f9d3f..29c5e52 100644
--- a/docs/analyzing-social-media/index.html
+++ b/docs/analyzing-social-media/index.html
@@ -294,8 +294,6 @@
               
                 <li class="toctree-l3"><a class="reference internal" href="/docs/embedded-mode-prerequisites/">Embedded Mode Prerequisites</a></li>
               
-                <li class="toctree-l3"><a class="reference internal" href="/docs/running-drill-on-docker/">Running Drill on Docker</a></li>
-              
                 <li class="toctree-l3"><a class="reference internal" href="/docs/installing-drill-on-linux-and-mac-os-x/">Installing Drill on Linux and Mac OS X</a></li>
               
                 <li class="toctree-l3"><a class="reference internal" href="/docs/starting-drill-on-linux-and-mac-os-x/">Starting Drill on Linux and Mac OS X</a></li>
@@ -459,10 +457,6 @@
             
           
             
-              <li class="toctree-l2"><a class="reference internal" href="/docs/logfile-plugin/">Logfile Plugin</a></li>
-            
-          
-            
               <li class="toctree-l2"><a class="reference internal" href="/docs/hbase-storage-plugin/">HBase Storage Plugin</a></li>
             
           
@@ -487,10 +481,6 @@
             
           
             
-              <li class="toctree-l2"><a class="reference internal" href="/docs/httpd-format-plugin/">HTTPD Format Plugin</a></li>
-            
-          
-            
               <li class="toctree-l2"><a class="reference internal" href="/docs/opentsdb-storage-plugin/">OpenTSDB Storage Plugin</a></li>
             
           
@@ -499,19 +489,15 @@
             
           
             
-              <li class="toctree-l2"><a class="reference internal" href="/docs/image-metadata-format-plugin/">Image Metadata Format Plugin</a></li>
-            
-          
-            
               <li class="toctree-l2"><a class="reference internal" href="/docs/azure-blob-storage-plugin/">Azure Blob Storage Plugin</a></li>
             
           
             
-              <li class="toctree-l2"><a class="reference internal" href="/docs/syslog-format-plugin/">Syslog Format Plugin</a></li>
+              <li class="toctree-l2"><a class="reference internal" href="/docs/druid-storage-plugin/">Druid Storage Plugin</a></li>
             
           
             
-              <li class="toctree-l2"><a class="reference internal" href="/docs/ltsv-format-plugin/">LTSV Format Plugin</a></li>
+              <li class="toctree-l2"><a class="reference internal" href="/docs/http-storage-plugin/">HTTP Storage Plugin</a></li>
             
           
           </ul>
@@ -886,6 +872,12 @@
               
                 <li class="toctree-l3"><a class="reference internal" href="/docs/cryptography-functions/">Cryptography Functions</a></li>
               
+                <li class="toctree-l3"><a class="reference internal" href="/docs/sql-dialect-compatibility-functions/">SQL dialect compatibility functions</a></li>
+              
+                <li class="toctree-l3"><a class="reference internal" href="/docs/gis-functions/">GIS functions</a></li>
+              
+                <li class="toctree-l3"><a class="reference internal" href="/docs/time-series-analysis-functions/">Time Series Analysis Functions</a></li>
+              
             </ul>
             
           
@@ -1043,6 +1035,10 @@
             
           
             
+              <li class="toctree-l2"><a class="reference internal" href="/docs/logfile-plugin/">Logfile Plugin</a></li>
+            
+          
+            
               <li class="toctree-l2"><a class="reference internal" href="/docs/json-data-model/">JSON Data Model</a></li>
             
           
@@ -1054,6 +1050,38 @@
               <li class="toctree-l2"><a class="reference internal" href="/docs/sequence-files/">Sequence Files</a></li>
             
           
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/httpd-format-plugin/">HTTPD Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/image-metadata-format-plugin/">Image Metadata Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/syslog-format-plugin/">Syslog Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/ltsv-format-plugin/">LTSV Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/spss-format-plugin/">SPSS Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/esri-shapefile-format-plugin/">ESRI Shapefile Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/excel-format-plugin/">Excel Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/hdf5-format-plugin/">HDF5 Format Plugin</a></li>
+            
+          
           </ul>
         
       
@@ -1171,6 +1199,10 @@
           <ul style="display: none">
           
             
+              <li class="toctree-l2"><a class="reference internal" href="/docs/apache-drill-1-18-0-release-notes/">Apache Drill 1.18.0 Release Notes</a></li>
+            
+          
+            
               <li class="toctree-l2"><a class="reference internal" href="/docs/apache-drill-1-17-0-release-notes/">Apache Drill 1.17.0 Release Notes</a></li>
             
           
@@ -1335,9 +1367,9 @@
 
     </div>
 
-     Nov 2, 2018
-
-    <link href="/css/docpage.css" rel="stylesheet" type="text/css">
+    <!-- jt: we don't need to display a last-modified date on each page to users
+     Dec 10, 2020
+    --> 
 
     <div class="int_text" align="left">
       
@@ -1346,10 +1378,10 @@
 <h2 id="social-media-analysis-prerequisites">Social Media Analysis Prerequisites</h2>
 
 <ul>
-<li>Twitter developer account</li>
-<li>AWS account</li>
-<li>A MapR node on AWS</li>
-<li>A MicroStrategy AWS instance</li>
+  <li>Twitter developer account</li>
+  <li>AWS account</li>
+  <li>A MapR node on AWS</li>
+  <li>A MicroStrategy AWS instance</li>
 </ul>
 
 <h2 id="configuring-the-aws-environment">Configuring the AWS environment</h2>
@@ -1357,79 +1389,85 @@
 <p>Configuring the environment on Amazon Web Services (AWS) consists of these tasks:</p>
 
 <ul>
-<li>Create a Twitter Dev account and register a Twitter application<br></li>
-<li>Provision a preconfigured AWS MapR node with Flume and Drill<br></li>
-<li>Provision a MicroStrategy AWS instance<br></li>
-<li>Configure MicroStrategy to run reports and analyses using Drill<br></li>
-<li>Create a Twitter Dev account and register an application</li>
+  <li>Create a Twitter Dev account and register a Twitter application</li>
+  <li>Provision a preconfigured AWS MapR node with Flume and Drill</li>
+  <li>Provision a MicroStrategy AWS instance</li>
+  <li>Configure MicroStrategy to run reports and analyses using Drill</li>
+  <li>Create a Twitter Dev account and register an application</li>
 </ul>
 
 <p>This tutorial assumes you are familiar with MicroStrategy. For information about using MicroStrategy, see the <a href="http://www.microstrategy.com/Strategy/media/downloads/products/cloud/cloud_aws-user-guide.pdf">MicroStrategy documentation</a>.</p>
 
-<hr>
+<hr />
 
 <h2 id="establishing-a-twitter-feed-and-flume-credentials">Establishing a Twitter Feed and Flume Credentials</h2>
 
 <p>The following steps establish a Twitter feed and get Twitter credentials required by Flume to set up Twitter as a data source:</p>
 
 <ol>
-<li>Go to dev.twitter.com and sign in with your Twitter account details.<br></li>
-<li>Click <strong>Manage Your Apps</strong> under Tools in the page footer.<br></li>
-<li>Click <strong>Create New App</strong> and fill in the form, then create the application.</li>
-<li>On the <strong>Keys and Access Tokens</strong> tab, create an access token, and then click <strong>Create My Access Token</strong>. If you have read-only access, you can create the token.</li>
-<li>Copy the following credentials for the Twitter App that will be used to configure Flume: 
-
-<ul>
-<li>Consumer Key</li>
-<li>Consumer Secret</li>
-<li>Access Token</li>
-<li>Access Token Secret</li>
-</ul></li>
+  <li>Go to dev.twitter.com and sign in with your Twitter account details.</li>
+  <li>Click <strong>Manage Your Apps</strong> under Tools in the page footer.</li>
+  <li>Click <strong>Create New App</strong> and fill in the form, then create the application.</li>
+  <li>On the <strong>Keys and Access Tokens</strong> tab, create an access token, and then click <strong>Create My Access Token</strong>. If you have read-only access, you can create the token.</li>
+  <li>Copy the following credentials for the Twitter App that will be used to configure Flume:
+    <ul>
+      <li>Consumer Key</li>
+      <li>Consumer Secret</li>
+      <li>Access Token</li>
+      <li>Access Token Secret</li>
+    </ul>
+  </li>
 </ol>
 
-<hr>
+<hr />
 
 <h2 id="provision-preconfigured-mapr-node-on-aws">Provision Preconfigured MapR Node on AWS</h2>
 
 <p>You need to provision a preconfigured MapR node on AWS named ami-4dedc47d. The AMI is already configured with Flume, Drill, and specific elements to support data streaming from Twitter and Drill query views. The AMI is publicly available under Community AMIs, has a 6GB root drive, and a 100GB data drive. Being a small node, very large volumes of data will significantly decrease the response time to Twitter data queries.</p>
 
 <ol>
-<li>In AWS, launch an instance.<br>
-The AMI image is preconfigured to use a m2.2xlarge instance type with 4 vCPUs and 32GB of memory.<br></li>
-<li>Select the AMI id ami-4dedc47d.<br></li>
-<li>Make sure that the instance has been assigned an external IP address; an Elastic IP is preferred, but not essential.<br></li>
-<li>Verify that a security group is used with open TCP and UDP ports on the node. At this time, all ports are left open on the node.</li>
-<li>After provisioning and booting up the instance, reboot the node in the AWS EC2 management interface to finalize the configuration.</li>
+  <li>In AWS, launch an instance.<br />
+The AMI image is preconfigured to use a m2.2xlarge instance type with 4 vCPUs and 32GB of memory.</li>
+  <li>Select the AMI id ami-4dedc47d.</li>
+  <li>Make sure that the instance has been assigned an external IP address; an Elastic IP is preferred, but not essential.</li>
+  <li>Verify that a security group is used with open TCP and UDP ports on the node. At this time, all ports are left open on the node.</li>
+  <li>After provisioning and booting up the instance, reboot the node in the AWS EC2 management interface to finalize the configuration.</li>
 </ol>
 
 <p>The node is now configured with the required Flume and Drill installation. Next, update the Flume configuration files with the required credentials and keywords.</p>
 
-<hr>
+<hr />
 
 <h2 id="update-flume-configuration-files">Update Flume Configuration Files</h2>
 
 <ol>
-<li>Log in as the ec2-user using the AWS credentials.</li>
-<li>Switch to the mapr user on the node using <code>su – mapr.</code></li>
-<li>Update the Flume configuration files <code>flume-env.sh</code> and <code>flume</code>.conf in the <code>&lt;FLUME HOME&gt;/conf</code> directory using the Twitter app credentials from the first section. See the <a href="https://github.com/mapr/mapr-demos/tree/master/drill-twitter-MSTR/flume">sample files</a>.</li>
-<li>Enter the desired keywords, separated by a comma.<br>
-Separate multiple keywords using a space.<br></li>
-<li>Filter tweets for specific languages, if needed, by entering the ISO 639-1 <a href="http://en.wikipedia.org/wiki/List_of_ISO_639-1_codes">language codes</a> separated by a comma. If you need no language filtering, leave the parameter blank.<br></li>
-<li>Go to the FLUME HOME directory and, as user <code>mapr</code>, type screen on the command line as user <code>mapr</code>:<br></li>
-<li><p>Start Flume by typing the following command:  </p>
-<div class="highlight"><pre><code class="language-text" data-lang="text">./bin/flume-ng agent --conf ./conf/ -f ./conf/flume.conf -Dflume.root.logger=INFO,console -n TwitterAgent
-</code></pre></div></li>
-<li><p>Enter <code>CTRL+a</code> to exit, followed by <code>d</code> to detach.<br>
-To go back to the screen terminal, simply enter screen –r to reattach.<br>
-Twitter data streams into the system.  </p></li>
-<li><p>Run the following command to verify volumes:</p>
-<div class="highlight"><pre><code class="language-text" data-lang="text"> du –h /mapr/drill_demo/twitter/feed.
-</code></pre></div></li>
+  <li>Log in as the ec2-user using the AWS credentials.</li>
+  <li>Switch to the mapr user on the node using <code class="language-plaintext highlighter-rouge">su – mapr.</code></li>
+  <li>Update the Flume configuration files <code class="language-plaintext highlighter-rouge">flume-env.sh</code> and <code class="language-plaintext highlighter-rouge">flume</code>.conf in the <code class="language-plaintext highlighter-rouge">&lt;FLUME HOME&gt;/conf</code> directory using the Twitter app credentials from the first section. See the <a href="https://github.com/mapr/mapr-demos/tree/master/drill-twitter-MSTR/flume">sample files</a>.</li>
+  <li>Enter the desired keywords, separated by a comma.<br />
+Separate multiple keywords using a space.</li>
+  <li>Filter tweets for specific languages, if needed, by entering the ISO 639-1 <a href="http://en.wikipedia.org/wiki/List_of_ISO_639-1_codes">language codes</a> separated by a comma. If you need no language filtering, leave the parameter blank.</li>
+  <li>Go to the FLUME HOME directory and, as user <code class="language-plaintext highlighter-rouge">mapr</code>, type screen on the command line as user <code class="language-plaintext highlighter-rouge">mapr</code>:</li>
+  <li>
+    <p>Start Flume by typing the following command:</p>
+
+    <div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code> ./bin/flume-ng agent --conf ./conf/ -f ./conf/flume.conf -Dflume.root.logger=INFO,console -n TwitterAgent
+</code></pre></div>    </div>
+  </li>
+  <li>Enter <code class="language-plaintext highlighter-rouge">CTRL+a</code> to exit, followed by <code class="language-plaintext highlighter-rouge">d</code> to detach.<br />
+To go back to the screen terminal, simply enter screen –r to reattach.<br />
+Twitter data streams into the system.</li>
+  <li>
+    <p>Run the following command to verify volumes:</p>
+
+    <div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>  du –h /mapr/drill_demo/twitter/feed.
+</code></pre></div>    </div>
+  </li>
 </ol>
 
-<p>You cannot run queries until data appears in the feed directory. Allow 20-30 minutes minimum. </p>
+<p>You cannot run queries until data appears in the feed directory. Allow 20-30 minutes minimum.</p>
 
-<hr>
+<hr />
 
 <h2 id="provision-a-microstrategy-aws-instance">Provision a MicroStrategy AWS Instance</h2>
 
@@ -1438,136 +1476,137 @@ Twitter data streams into the system.  </p></li>
 <p>To provision the MicroStrategy node in AWS:</p>
 
 <ol>
-<li>On the <a href="http://www.microstrategy.com/us/analytics/analytics-on-aws">MicroStrategy website</a>, click <strong>Get started</strong>.<br></li>
-<li>Select some number of users, for example, select 25 users.<br></li>
-<li>Select the AWS region. Using a MapR node and MicroStrategy instance in the same AWS region is highly recommended.</li>
-<li>Click <strong>Continue</strong>.<br></li>
-<li>On the Manual Launch tab, click <strong>Launch with EC2 Console</strong> next to the appropriate region, and select <strong>r3.large instance</strong>.<br>
-An EC2 instance of r3.large is sufficient for the 25 user version.<br></li>
-<li>Click <strong>Configure Instance Details</strong>.</li>
-<li>Select an appropriate network setting and zones, ideally within the same zone and network as the MapR node that you provisioned.
-<div class="admonition important">
-<p class="first admonition-title">Important</p>
-<p class="last">Make sure that the MicroStrategy instance has a Public IP; elastic IP is preferred but not essential.  </p>
-</div></li>
-<li>Keep the default storage.</li>
-<li>Assign a tag to identify the instance.</li>
-<li>Select a security group that allows sufficient access to external IPs and open all ports because security is not a concern. </li>
-<li>In the AWS Console, launch an instance, and when the AWS reports that the instance is running, select it, and click <strong>Connect</strong>.</li>
-<li>Click <strong>Get Password</strong> to get the OS Administrator password.</li>
+  <li>On the <a href="http://www.microstrategy.com/us/analytics/analytics-on-aws">MicroStrategy website</a>, click <strong>Get started</strong>.</li>
+  <li>Select some number of users, for example, select 25 users.</li>
+  <li>Select the AWS region. Using a MapR node and MicroStrategy instance in the same AWS region is highly recommended.</li>
+  <li>Click <strong>Continue</strong>.</li>
+  <li>On the Manual Launch tab, click <strong>Launch with EC2 Console</strong> next to the appropriate region, and select <strong>r3.large instance</strong>.<br />
+An EC2 instance of r3.large is sufficient for the 25 user version.</li>
+  <li>Click <strong>Configure Instance Details</strong>.</li>
+  <li>Select an appropriate network setting and zones, ideally within the same zone and network as the MapR node that you provisioned.</li>
+  <li>Keep the default storage.</li>
+  <li>Assign a tag to identify the instance.</li>
+  <li>Select a security group that allows sufficient access to external IPs and open all ports because security is not a concern.</li>
+  <li>In the AWS Console, launch an instance, and when the AWS reports that the instance is running, select it, and click <strong>Connect</strong>.</li>
+  <li>Click <strong>Get Password</strong> to get the OS Administrator password.</li>
 </ol>
 
+<div class="admonition important">
+  <p class="first admonition-title">Important</p>
+  <p class="last">Make sure that the MicroStrategy instance has a Public IP; elastic IP is preferred but not essential.  </p>
+</div>
+
 <p>The instance is now accessible with RDP and is using the relevant AWS credentials and security.</p>
 
-<hr>
+<hr />
 
 <h2 id="configure-microstrategy">Configure MicroStrategy</h2>
 
 <p>You need to configure MicroStrategy to integrate with Drill using the ODBC driver. You install a MicroStrategy package with a number of useful, prebuilt reports for working with Twitter data. You can modify the reports or use the reports as a template to create new and more interesting reports and analysis models.</p>
 
 <ol>
-<li>Configure a System DSN named <code>Twitter</code> with the ODBC administrator. The quick start version of the MapR ODBC driver requires the DSN.<br></li>
-<li><a href="http://package.mapr.com/tools/MapR-ODBC/MapR_Drill/MapRDrill_odbc_v0.08.1.0618/MapRDrillODBC32.msi">Download the quick start version of the MapR ODBC driver for Drill</a>.<br></li>
-<li><a href="http://drill.apache.org/docs/using-microstrategy-analytics-with-apache-drill">Configure the ODBC driver</a> for Drill on MicroStrategy Analytics.<br>
-The Drill object is part of the package and doesn’t need to be configured.<br></li>
-<li>Use the AWS Private IP if both the MapR node and the MicroStrategy instance are located in the same region (recommended).</li>
-<li>Download the <a href="https://github.com/mapr/mapr-demos/blob/master/drill-twitter-MSTR/MSTR/DrillTwitterProjectPackage.mmp">Drill and Twitter configuration</a> package for MicroStrategy on the Windows system using Git for Windows or the full GitHub for Windows.</li>
+  <li>Configure a System DSN named <code class="language-plaintext highlighter-rouge">Twitter</code> with the ODBC administrator. The quick start version of the MapR ODBC driver requires the DSN.</li>
+  <li><a href="http://package.mapr.com/tools/MapR-ODBC/MapR_Drill/MapRDrill_odbc_v0.08.1.0618/MapRDrillODBC32.msi">Download the quick start version of the MapR ODBC driver for Drill</a>.</li>
+  <li><a href="http://drill.apache.org/docs/using-microstrategy-analytics-with-apache-drill">Configure the ODBC driver</a> for Drill on MicroStrategy Analytics.<br />
+ The Drill object is part of the package and doesn’t need to be configured.</li>
+  <li>Use the AWS Private IP if both the MapR node and the MicroStrategy instance are located in the same region (recommended).</li>
+  <li>Download the <a href="https://github.com/mapr/mapr-demos/blob/master/drill-twitter-MSTR/MSTR/DrillTwitterProjectPackage.mmp">Drill and Twitter configuration</a> package for MicroStrategy on the Windows system using Git for Windows or the full GitHub for Windows.</li>
 </ol>
 
-<hr>
+<hr />
 
 <h2 id="import-reports">Import Reports</h2>
 
 <ol>
-<li>In MicroStrategy Developer, select <strong>Schema &gt; Create New Project</strong> to create a new project with MicroStrategy Developer.<br></li>
-<li>Click <strong>Create Project</strong> and type a name for the new project.<br></li>
-<li>Click <strong>OK</strong>.<br>
-The Project appears in MicroStrategy Developer.<br></li>
-<li>Open MicroStrategy Object Manager.<br></li>
-<li>Connect to the Project Source and login as Administrator.<br>
-<img src="/docs/img/socialmed1.png" alt="project sources"></li>
-<li>In MicroStrategy Object Manager, MicroStrategy Analytics Modules, select the project for the package. For example, select <strong>Twitter analysis Apache Drill</strong>.<br>
-<img src="/docs/img/socialmed2.png" alt="project sources"></li>
-<li>Select <strong>Tools &gt; Import Configuration Package</strong>.<br></li>
-<li>Open the configuration package file, and click <strong>Proceed</strong>.<br>
-<img src="/docs/img/socialmed3.png" alt="project sources">
-The package with the reports is available in MicroStrategy.<br></li>
+  <li>In MicroStrategy Developer, select <strong>Schema &gt; Create New Project</strong> to create a new project with MicroStrategy Developer.</li>
+  <li>Click <strong>Create Project</strong> and type a name for the new project.</li>
+  <li>Click <strong>OK</strong>.<br />
+The Project appears in MicroStrategy Developer.</li>
+  <li>Open MicroStrategy Object Manager.</li>
+  <li>Connect to the Project Source and login as Administrator.<br />
+<img src="/images/docs/socialmed1.png" alt="project sources" /></li>
+  <li>In MicroStrategy Object Manager, MicroStrategy Analytics Modules, select the project for the package. For example, select <strong>Twitter analysis Apache Drill</strong>.<br />
+<img src="/images/docs/socialmed2.png" alt="project sources" /></li>
+  <li>Select <strong>Tools &gt; Import Configuration Package</strong>.</li>
+  <li>Open the configuration package file, and click <strong>Proceed</strong>.<br />
+<img src="/images/docs/socialmed3.png" alt="project sources" />
+The package with the reports is available in MicroStrategy.</li>
 </ol>
 
 <p>You can test and modify the reports in MicroStrategy Developer. Configure permissions if necessary.</p>
 
-<hr>
+<hr />
 
 <h2 id="update-the-schema">Update the Schema</h2>
 
 <ol>
-<li>In MicroStrategy Developer, select <strong>Schema &gt; Update Schema</strong>.<br></li>
-<li>In Schema Update, select all check boxes, and click <strong>Update</strong>.<br>
-<img src="/docs/img/socialmed4.png" alt="project sources"></li>
+  <li>In MicroStrategy Developer, select <strong>Schema &gt; Update Schema</strong>.</li>
+  <li>In Schema Update, select all check boxes, and click <strong>Update</strong>.<br />
+<img src="/images/docs/socialmed4.png" alt="project sources" /></li>
 </ol>
 
-<hr>
+<hr />
 
 <h2 id="create-a-user-and-set-the-password">Create a User and Set the Password</h2>
 
 <ol>
-<li>Expand Administration.<br></li>
-<li>Expand User Manager, and click <strong>Everyone</strong>.<br></li>
-<li>Right-click to create a new user, or click <strong>Administrator</strong> to edit the password.<br></li>
+  <li>Expand Administration.</li>
+  <li>Expand User Manager, and click <strong>Everyone</strong>.</li>
+  <li>Right-click to create a new user, or click <strong>Administrator</strong> to edit the password.</li>
 </ol>
 
-<hr>
+<hr />
 
 <h2 id="about-the-reports">About the Reports</h2>
 
 <p>There are 18 reports in the package. Most reports prompt you to specify date ranges, output limits, and terms as needed. The package contains reports in three main categories:</p>
 
 <ul>
-<li>Volumes: A number of reports that show the total volume of Tweets by different date and time designations.</li>
-<li>Top List: Displays the top Tweets, Retweets, hashtags and users are displayed.</li>
-<li>Specific Terms: Tweets and Retweets that can be measured or listed based on terms in the text of the Tweet itself.</li>
+  <li>Volumes: A number of reports that show the total volume of Tweets by different date and time designations.</li>
+  <li>Top List: Displays the top Tweets, Retweets, hashtags and users are displayed.</li>
+  <li>Specific Terms: Tweets and Retweets that can be measured or listed based on terms in the text of the Tweet itself.</li>
 </ul>
 
-<p>You can copy and modify the reports or use the reports as a template for querying Twitter data using Drill. </p>
+<p>You can copy and modify the reports or use the reports as a template for querying Twitter data using Drill.</p>
 
 <p>You can access reports through MicroStrategy Developer or the web interface. MicroStrategy Developer provides a more powerful interface than the web interface to modify reports or add new reports, but requires RDP access to the node.</p>
 
-<hr>
+<hr />
 
 <h2 id="using-the-web-interface">Using the Web Interface</h2>
 
 <ol>
-<li>Using a web browser, enter the URL for the web interface:<br>
-     http://<MSTR node name or IP address>/MicroStrategy/asp/Main.aspx</li>
-<li>Log in as the User you created or as Administrator, using the credentials created initially with Developer.<br></li>
-<li>On the Welcome MicroStrategy Web User page, choose the project that was used to load the analysis package: <strong>Drill Twitter Analysis</strong>.<br>
-<img src="/docs/img/socialmed5.png" alt="choose project"></li>
-<li>Select <strong>Shared Reports</strong>.<br>
+  <li>Using a web browser, enter the URL for the web interface:<br />
+      http://<MSTR node="" name="" or="" IP="" address="">/MicroStrategy/asp/Main.aspx</MSTR></li>
+  <li>Log in as the User you created or as Administrator, using the credentials created initially with Developer.</li>
+  <li>On the Welcome MicroStrategy Web User page, choose the project that was used to load the analysis package: <strong>Drill Twitter Analysis</strong>.<br />
+<img src="/images/docs/socialmed5.png" alt="choose project" /></li>
+  <li>Select <strong>Shared Reports</strong>.<br />
 The folders with the three main categories of the reports appear.
-<img src="/docs/img/socialmed6.png" alt="project sources"></li>
-<li>Select a report, and respond to any prompts. For example, to run the Top Tweet Languages by Date Range, enter the required Date_Start and Date_End.<br>
-<img src="/docs/img/socialmed7.png" alt="project sources"></li>
-<li>Click <strong>Run Report</strong>.<br>
+<img src="/images/docs/socialmed6.png" alt="project sources" /></li>
+  <li>Select a report, and respond to any prompts. For example, to run the Top Tweet Languages by Date Range, enter the required Date_Start and Date_End.<br />
+<img src="/images/docs/socialmed7.png" alt="project sources" /></li>
+  <li>Click <strong>Run Report</strong>.<br />
 A histogram report appears showing the top tweet languages by date range.
-<img src="/docs/img/socialmed8.png" alt="project sources"></li>
-<li>To refresh the data or re-enter prompt values, select <strong>Data &gt; Refresh</strong> or <strong>Data &gt; Re-prompt</strong>.</li>
+<img src="/images/docs/socialmed8.png" alt="project sources" /></li>
+  <li>To refresh the data or re-enter prompt values, select <strong>Data &gt; Refresh</strong> or <strong>Data &gt; Re-prompt</strong>.</li>
 </ol>
 
 <h2 id="browsing-the-apache-drill-twitter-analysis-reports">Browsing the Apache Drill Twitter Analysis Reports</h2>
 
-<p>The MicroStrategy Developer reports are located in the Public Objects folder of the project you chose for installing the package.<br>
-   <img src="/docs/img/socialmed9.png" alt="project sources">
-Many of the reports require you to respond to prompts to select the desired data. For example, select the Top Hashtags report in the right-hand column. This report requires you to respond to prompts for a Start Date and End Date to specify the date range for data of interest; by default, data for the last two months, ending with the current date is selected. You can also specify the limit for the number of Top Hashtags to be returned; the default is the top 10 hashtags.<br>
-   <img src="/docs/img/socialmed10.png" alt="project sources">
-When you click <strong>Finish</strong> a bar chart report with the hashtag and number of times it appeared in the specified data range appears.<br>
-   <img src="/docs/img/socialmed11.png" alt="project sources"></p>
+<p>The MicroStrategy Developer reports are located in the Public Objects folder of the project you chose for installing the package.<br />
+   <img src="/images/docs/socialmed9.png" alt="project sources" />
+Many of the reports require you to respond to prompts to select the desired data. For example, select the Top Hashtags report in the right-hand column. This report requires you to respond to prompts for a Start Date and End Date to specify the date range for data of interest; by default, data for the last two months, ending with the current date is selected. You can also specify the limit for the number of Top Hashtags to be returned; the default is the top 10 hashtags.<br />
+   <img src="/images/docs/socialmed10.png" alt="project sources" />
+When you click <strong>Finish</strong> a bar chart report with the hashtag and number of times it appeared in the specified data range appears.<br />
+   <img src="/images/docs/socialmed11.png" alt="project sources" /></p>
 
 <p>Other reports are available in the bundle. For example, this report shows total tweets by hour:
-   <img src="/docs/img/socialmed12.png" alt="tweets by hour">
-This report shows top Retweets for a date range with original Tweet date and count in the date range.<br>
-   <img src="/docs/img/socialmed13.png" alt="retweets report"></p>
+   <img src="/images/docs/socialmed12.png" alt="tweets by hour" />
+This report shows top Retweets for a date range with original Tweet date and count in the date range.<br />
+   <img src="/images/docs/socialmed13.png" alt="retweets report" /></p>
 
-<hr>
+<hr />
 
 <h2 id="summary">Summary</h2>
 
@@ -1589,7 +1628,7 @@ This report shows top Retweets for a date range with original Tweet date and cou
   <p class="push"></p>
 <div id="footer" class="mw">
 <div class="wrapper">
-Copyright © 2012-2014 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
+Copyright © 2012-2020 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
 Apache and the Apache feather logo are trademarks of The Apache Software Foundation. Other names appearing on the site may be trademarks of their respective owners.<br/><br/>
 </div>
 </div>
diff --git a/docs/analyzing-the-yelp-academic-dataset/index.html b/docs/analyzing-the-yelp-academic-dataset/index.html
index 3a69f4e..c0f7e7e 100644
--- a/docs/analyzing-the-yelp-academic-dataset/index.html
+++ b/docs/analyzing-the-yelp-academic-dataset/index.html
@@ -294,8 +294,6 @@
               
                 <li class="toctree-l3"><a class="reference internal" href="/docs/embedded-mode-prerequisites/">Embedded Mode Prerequisites</a></li>
               
-                <li class="toctree-l3"><a class="reference internal" href="/docs/running-drill-on-docker/">Running Drill on Docker</a></li>
-              
                 <li class="toctree-l3"><a class="reference internal" href="/docs/installing-drill-on-linux-and-mac-os-x/">Installing Drill on Linux and Mac OS X</a></li>
               
                 <li class="toctree-l3"><a class="reference internal" href="/docs/starting-drill-on-linux-and-mac-os-x/">Starting Drill on Linux and Mac OS X</a></li>
@@ -459,10 +457,6 @@
             
           
             
-              <li class="toctree-l2"><a class="reference internal" href="/docs/logfile-plugin/">Logfile Plugin</a></li>
-            
-          
-            
               <li class="toctree-l2"><a class="reference internal" href="/docs/hbase-storage-plugin/">HBase Storage Plugin</a></li>
             
           
@@ -487,10 +481,6 @@
             
           
             
-              <li class="toctree-l2"><a class="reference internal" href="/docs/httpd-format-plugin/">HTTPD Format Plugin</a></li>
-            
-          
-            
               <li class="toctree-l2"><a class="reference internal" href="/docs/opentsdb-storage-plugin/">OpenTSDB Storage Plugin</a></li>
             
           
@@ -499,19 +489,15 @@
             
           
             
-              <li class="toctree-l2"><a class="reference internal" href="/docs/image-metadata-format-plugin/">Image Metadata Format Plugin</a></li>
-            
-          
-            
               <li class="toctree-l2"><a class="reference internal" href="/docs/azure-blob-storage-plugin/">Azure Blob Storage Plugin</a></li>
             
           
             
-              <li class="toctree-l2"><a class="reference internal" href="/docs/syslog-format-plugin/">Syslog Format Plugin</a></li>
+              <li class="toctree-l2"><a class="reference internal" href="/docs/druid-storage-plugin/">Druid Storage Plugin</a></li>
             
           
             
-              <li class="toctree-l2"><a class="reference internal" href="/docs/ltsv-format-plugin/">LTSV Format Plugin</a></li>
+              <li class="toctree-l2"><a class="reference internal" href="/docs/http-storage-plugin/">HTTP Storage Plugin</a></li>
             
           
           </ul>
@@ -886,6 +872,12 @@
               
                 <li class="toctree-l3"><a class="reference internal" href="/docs/cryptography-functions/">Cryptography Functions</a></li>
               
+                <li class="toctree-l3"><a class="reference internal" href="/docs/sql-dialect-compatibility-functions/">SQL dialect compatibility functions</a></li>
+              
+                <li class="toctree-l3"><a class="reference internal" href="/docs/gis-functions/">GIS functions</a></li>
+              
+                <li class="toctree-l3"><a class="reference internal" href="/docs/time-series-analysis-functions/">Time Series Analysis Functions</a></li>
+              
             </ul>
             
           
@@ -1043,6 +1035,10 @@
             
           
             
+              <li class="toctree-l2"><a class="reference internal" href="/docs/logfile-plugin/">Logfile Plugin</a></li>
+            
+          
+            
               <li class="toctree-l2"><a class="reference internal" href="/docs/json-data-model/">JSON Data Model</a></li>
             
           
@@ -1054,6 +1050,38 @@
               <li class="toctree-l2"><a class="reference internal" href="/docs/sequence-files/">Sequence Files</a></li>
             
           
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/httpd-format-plugin/">HTTPD Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/image-metadata-format-plugin/">Image Metadata Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/syslog-format-plugin/">Syslog Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/ltsv-format-plugin/">LTSV Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/spss-format-plugin/">SPSS Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/esri-shapefile-format-plugin/">ESRI Shapefile Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/excel-format-plugin/">Excel Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/hdf5-format-plugin/">HDF5 Format Plugin</a></li>
+            
+          
           </ul>
         
       
@@ -1171,6 +1199,10 @@
           <ul style="display: none">
           
             
+              <li class="toctree-l2"><a class="reference internal" href="/docs/apache-drill-1-18-0-release-notes/">Apache Drill 1.18.0 Release Notes</a></li>
+            
+          
+            
               <li class="toctree-l2"><a class="reference internal" href="/docs/apache-drill-1-17-0-release-notes/">Apache Drill 1.17.0 Release Notes</a></li>
             
           
@@ -1335,9 +1367,9 @@
 
     </div>
 
-     Nov 2, 2018
-
-    <link href="/css/docpage.css" rel="stylesheet" type="text/css">
+    <!-- jt: we don't need to display a last-modified date on each page to users
+     Dec 10, 2020
+    --> 
 
     <div class="int_text" align="left">
       
@@ -1355,7 +1387,7 @@ against the Yelp data set. The publicly available data set used for this
 example is downloadable from <a href="http://www.yelp.com/dataset_challenge">Yelp</a>
 (business reviews) and is in JSON format.</p>
 
-<hr>
+<hr />
 
 <h2 id="installing-and-starting-drill">Installing and Starting Drill</h2>
 
@@ -1374,23 +1406,25 @@ analysis extremely easy.</p>
   <p class="last">You need to substitute your local path to the Yelp data set in the angle-bracketed portion of the FROM clause of each query you run.  </p>
 </div>
 
-<hr>
+<hr />
 
 <h2 id="querying-data-with-drill">Querying Data with Drill</h2>
 
 <h3 id="1-view-the-contents-of-the-yelp-business-data">1. View the contents of the Yelp business data</h3>
-<div class="highlight"><pre><code class="language-text" data-lang="text">0: jdbc:drill:zk=local&gt; !set maxwidth 10000
+
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>0: jdbc:drill:zk=local&gt; !set maxwidth 10000
 
 0: jdbc:drill:zk=local&gt; select * from
     dfs.`&lt;path-to-yelp-dataset&gt;/yelp/yelp_academic_dataset_business.json`
     limit 1;
 
-+------------------------+----------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+------+--------------------------------+---------+--------------+-------------------+-------------+-------+-------+-----------+--------------------------------- [...]
+|------------------------|----------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------|--------------------------------|---------|--------------|-------------------|-------------|-------|-------|-----------|--------------------------------- [...]
 | business_id            | full_address                                       | hours                                                                                                                                                                                                                                                      | open | categories                     | city    | review_count | name              | longitude   | state | stars | latitude  | attributes                       [...]
-+------------------------+----------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+------+--------------------------------+---------+--------------+-------------------+-------------+-------+-------+-----------+--------------------------------- [...]
-| vcNAWiLM4dR7D2nwwJ7nCA | 4840 E Indian School Rd Ste 101, Phoenix, AZ 85018 | fill in{&quot;Tuesday&quot;:{&quot;close&quot;:&quot;17:00&quot;,&quot;open&quot;:&quot;08:00&quot;},&quot;Friday&quot;:{&quot;close&quot;:&quot;17:00&quot;,&quot;open&quot;:&quot;08:00&quot;},&quot;Monday&quot;:{&quot;close&quot;:&quot;17:00&quot;,&quot;open&quot;:&quot;08:00&quot;},&quot;Wednesday&quot;:{&quot;close&quot;:&quot;17:00&quot;,&quot;open&quot;:&quot;08:00&quot;},&quot;Thursday&quot;:{&quot;clos [...]
-+------------------------+----------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+------+--------------------------------+---------+--------------+-------------------+-------------+-------+-------+-----------+--------------------------------- [...]
-</code></pre></div>
+|------------------------|----------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------|--------------------------------|---------|--------------|-------------------|-------------|-------|-------|-----------|--------------------------------- [...]
+| vcNAWiLM4dR7D2nwwJ7nCA | 4840 E Indian School Rd Ste 101, Phoenix, AZ 85018 | fill in{"Tuesday":{"close":"17:00","open":"08:00"},"Friday":{"close":"17:00","open":"08:00"},"Monday":{"close":"17:00","open":"08:00"},"Wednesday":{"close":"17:00","open":"08:00"},"Thursday":{"close":"17:00","open":"08:00"},"Sunday":{},"Saturday":{}} | true | ["Doctors","Health &amp; Medical"] | Phoenix | 7            | Eric Goldberg, MD | -111.983758 | AZ    | 3.5   | 33.499313 | {"By Appointment Only":true, [...]
+|------------------------|----------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------|--------------------------------|---------|--------------|-------------------|-------------|-------|-------|-----------|--------------------------------- [...]
+</code></pre></div></div>
+
 <div class="admonition note">
   <p class="first admonition-title">Note</p>
   <p class="last">This document aligns Drill output for example purposes. Drill output is not aligned in this case.  </p>
@@ -1401,23 +1435,26 @@ analysis extremely easy.</p>
 <h3 id="2-explore-the-business-data-set-further">2. Explore the business data set further</h3>
 
 <h4 id="total-reviews-in-the-data-set">Total reviews in the data set</h4>
-<div class="highlight"><pre><code class="language-text" data-lang="text">0: jdbc:drill:zk=local&gt; select sum(review_count) as totalreviews 
+
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>0: jdbc:drill:zk=local&gt; select sum(review_count) as totalreviews 
 from dfs.`/&lt;path-to-yelp-dataset&gt;/yelp/yelp_academic_dataset_business.json`;
 
-+--------------+
+|--------------|
 | totalreviews |
-+--------------+
+|--------------|
 | 1236445      |
-+--------------+
-</code></pre></div>
+|--------------|
+</code></pre></div></div>
+
 <h4 id="top-states-and-cities-in-total-number-of-reviews">Top states and cities in total number of reviews</h4>
-<div class="highlight"><pre><code class="language-text" data-lang="text">0: jdbc:drill:zk=local&gt; select state, city, count(*) totalreviews 
+
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>0: jdbc:drill:zk=local&gt; select state, city, count(*) totalreviews 
 from dfs.`/&lt;path-to-yelp-dataset&gt;/yelp/yelp_academic_dataset_business.json` 
 group by state, city order by count(*) desc limit 10;
 
-+------------+------------+--------------+
+|------------|------------|--------------|
 |   state    |    city    | totalreviews |
-+------------+------------+--------------+
+|------------|------------|--------------|
 | NV         | Las Vegas  | 12021        |
 | AZ         | Phoenix    | 7499         |
 | AZ         | Scottsdale | 3605         |
@@ -1428,16 +1465,18 @@ group by state, city order by count(*) desc limit 10;
 | AZ         | Chandler   | 1637         |
 | WI         | Madison    | 1630         |
 | AZ         | Glendale   | 1196         |
-+------------+------------+--------------+
-</code></pre></div>
+|------------|------------|--------------|
+</code></pre></div></div>
+
 <h4 id="average-number-of-reviews-per-business-star-rating">Average number of reviews per business star rating</h4>
-<div class="highlight"><pre><code class="language-text" data-lang="text">0: jdbc:drill:zk=local&gt; select stars,trunc(avg(review_count)) reviewsavg 
+
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>0: jdbc:drill:zk=local&gt; select stars,trunc(avg(review_count)) reviewsavg 
 from dfs.`/&lt;path-to-yelp-dataset&gt;/yelp/yelp_academic_dataset_business.json`
 group by stars order by stars desc;
 
-+------------+------------+
+|------------|------------|
 |   stars    | reviewsavg |
-+------------+------------+
+|------------|------------|
 | 5.0        | 8.0        |
 | 4.5        | 28.0       |
 | 4.0        | 48.0       |
@@ -1447,16 +1486,18 @@ group by stars order by stars desc;
 | 2.0        | 11.0       |
 | 1.5        | 9.0        |
 | 1.0        | 4.0        |
-+------------+------------+
-</code></pre></div>
-<h4 id="top-businesses-with-high-review-counts-1000">Top businesses with high review counts (&gt; 1000)</h4>
-<div class="highlight"><pre><code class="language-text" data-lang="text">0: jdbc:drill:zk=local&gt; select name, state, city, `review_count` from
+|------------|------------|
+</code></pre></div></div>
+
+<h4 id="top-businesses-with-high-review-counts--1000">Top businesses with high review counts (&gt; 1000)</h4>
+
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>0: jdbc:drill:zk=local&gt; select name, state, city, `review_count` from
 dfs.`/&lt;path-to-yelp-dataset&gt;/yelp/yelp_academic_dataset_business.json`
 where review_count &gt; 1000 order by `review_count` desc limit 10;
 
-+-------------------------------+-------------+------------+---------------+
+|-------------------------------|-------------|------------|---------------|
 |           name                |   state     |    city    |  review_count |
-+-------------------------------+-------------+------------+---------------+
+|-------------------------------|-------------|------------|---------------|
 | Mon Ami Gabi                  | NV          | Las Vegas  | 4084          |
 | Earl of Sandwich              | NV          | Las Vegas  | 3655          |
 | Wicked Spoon                  | NV          | Las Vegas  | 3408          |
@@ -1467,30 +1508,33 @@ where review_count &gt; 1000 order by `review_count` desc limit 10;
 | Bacchanal Buffet              | NV          | Las Vegas  | 2369          |
 | The Cosmopolitan of Las Vegas | NV          | Las Vegas  | 2253          |
 | Aria Hotel &amp; Casino           | NV          | Las Vegas  | 2224          |
-+-------------------------------+-------------+----------------------------+
-</code></pre></div>
+|-------------------------------|-------------|----------------------------|
+</code></pre></div></div>
+
 <h4 id="saturday-open-and-close-times-for-a-few-businesses">Saturday open and close times for a few businesses</h4>
-<div class="highlight"><pre><code class="language-text" data-lang="text">0: jdbc:drill:zk=local&gt; select b.name, b.hours.Saturday.`open`,
+
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>0: jdbc:drill:zk=local&gt; select b.name, b.hours.Saturday.`open`,
 b.hours.Saturday.`close`  
 from
 dfs.`/&lt;path-to-yelp-dataset&gt;/yelp/yelp_academic_dataset_business.json`
 b limit 10;
 
-+----------------------------+------------+------------+
+|----------------------------|------------|------------|
 |    name                    |   EXPR$1   |   EXPR$2   |
-+----------------------------+------------+------------+
+|----------------------------|------------|------------|
 | Eric Goldberg, MD          | 08:00      | 17:00      |
 | Pine Cone Restaurant       | null       | null       |
 | Deforest Family Restaurant | 06:00      | 22:00      |
-| Culver&#39;s                   | 10:30      | 22:00      |
+| Culver's                   | 10:30      | 22:00      |
 | Chang Jiang Chinese Kitchen| 11:00      | 22:00      |
 | Charter Communications     | null       | null       |
 | Air Quality Systems        | null       | null       |
 | McFarland Public Library   | 09:00      | 20:00      |
 | Green Lantern Restaurant   | 06:00      | 02:00      |
 | Spartan Animal Hospital    | 07:30      | 18:00      |
-+----------------------------+------------+------------+
-</code></pre></div>
+|----------------------------|------------|------------|
+</code></pre></div></div>
+
 <p>Note how Drill can traverse and refer through multiple levels of nesting.</p>
 
 <h3 id="3-get-the-amenities-of-each-business-in-the-data-set">3. Get the amenities of each business in the data set</h3>
@@ -1502,31 +1546,35 @@ schemas.</p>
 
 <p>First, change Drill to work in all text mode (so we can take a look at all of
 the data).</p>
-<div class="highlight"><pre><code class="language-text" data-lang="text">0: jdbc:drill:zk=local&gt; alter system set `store.json.all_text_mode` = true;
-+------------+-----------------------------------+
+
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>0: jdbc:drill:zk=local&gt; alter system set `store.json.all_text_mode` = true;
+|------------|-----------------------------------|
 |     ok     |  summary                          |
-+------------+-----------------------------------+
+|------------|-----------------------------------|
 | true       | store.json.all_text_mode updated. |
-+------------+-----------------------------------+
-</code></pre></div>
+|------------|-----------------------------------|
+</code></pre></div></div>
+
 <p>Then, query the attribute’s data.</p>
-<div class="highlight"><pre><code class="language-text" data-lang="text">0: jdbc:drill:zk=local&gt; select attributes from dfs.`/&lt;path-to-yelp-dataset&gt;/yelp/yelp_academic_dataset_business.json` limit 10;
 
-+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>0: jdbc:drill:zk=local&gt; select attributes from dfs.`/&lt;path-to-yelp-dataset&gt;/yelp/yelp_academic_dataset_business.json` limit 10;
+
+|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
 |                                                     attributes                                                                                                                    |
-+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| {&quot;By Appointment Only&quot;:&quot;true&quot;,&quot;Good For&quot;:{},&quot;Ambience&quot;:{},&quot;Parking&quot;:{},&quot;Music&quot;:{},&quot;Hair Types Specialized In&quot;:{},&quot;Payment Types&quot;:{},&quot;Dietary Restrictions&quot;:{}}                    |
-| {&quot;Take-out&quot;:&quot;true&quot;,&quot;Good For&quot;:{&quot;dessert&quot;:&quot;false&quot;,&quot;latenight&quot;:&quot;false&quot;,&quot;lunch&quot;:&quot;true&quot;,&quot;dinner&quot;:&quot;false&quot;,&quot;breakfast&quot;:&quot;false&quot;,&quot;brunch&quot;:&quot;false&quot;},&quot;Caters&quot;:&quot;false&quot;,&quot;Noise Level&quot;:&quot;averag |
-| {&quot;Take-out&quot;:&quot;true&quot;,&quot;Good For&quot;:{&quot;dessert&quot;:&quot;false&quot;,&quot;latenight&quot;:&quot;false&quot;,&quot;lunch&quot;:&quot;false&quot;,&quot;dinner&quot;:&quot;false&quot;,&quot;breakfast&quot;:&quot;false&quot;,&quot;brunch&quot;:&quot;true&quot;},&quot;Caters&quot;:&quot;false&quot;,&quot;Noise Level&quot;:&quot;quiet&quot; |
-| {&quot;Take-out&quot;:&quot;true&quot;,&quot;Good For&quot;:{},&quot;Takes Reservations&quot;:&quot;false&quot;,&quot;Delivery&quot;:&quot;false&quot;,&quot;Ambience&quot;:{},&quot;Parking&quot;:{&quot;garage&quot;:&quot;false&quot;,&quot;street&quot;:&quot;false&quot;,&quot;validated&quot;:&quot;false&quot;,&quot;lot&quot;:&quot;true&quot;,&quot;val |
-| {&quot;Take-out&quot;:&quot;true&quot;,&quot;Good For&quot;:{},&quot;Ambience&quot;:{},&quot;Parking&quot;:{},&quot;Has TV&quot;:&quot;false&quot;,&quot;Outdoor Seating&quot;:&quot;false&quot;,&quot;Attire&quot;:&quot;casual&quot;,&quot;Music&quot;:{},&quot;Hair Types Specialized In&quot;:{},&quot;Payment Types |
-| {&quot;Good For&quot;:{},&quot;Ambience&quot;:{},&quot;Parking&quot;:{},&quot;Music&quot;:{},&quot;Hair Types Specialized In&quot;:{},&quot;Payment Types&quot;:{},&quot;Dietary Restrictions&quot;:{}}                                                 |
-| {&quot;Good For&quot;:{},&quot;Ambience&quot;:{},&quot;Parking&quot;:{},&quot;Music&quot;:{},&quot;Hair Types Specialized In&quot;:{},&quot;Payment Types&quot;:{},&quot;Dietary Restrictions&quot;:{}}                                                 |
-| {&quot;Good For&quot;:{},&quot;Ambience&quot;:{},&quot;Parking&quot;:{},&quot;Wi-Fi&quot;:&quot;free&quot;,&quot;Music&quot;:{},&quot;Hair Types Specialized In&quot;:{},&quot;Payment Types&quot;:{},&quot;Dietary Restrictions&quot;:{}}                                  |
-| {&quot;Take-out&quot;:&quot;true&quot;,&quot;Good For&quot;:{&quot;dessert&quot;:&quot;false&quot;,&quot;latenight&quot;:&quot;false&quot;,&quot;lunch&quot;:&quot;false&quot;,&quot;dinner&quot;:&quot;true&quot;,&quot;breakfast&quot;:&quot;false&quot;,&quot;brunch&quot;:&quot;false&quot;},&quot;Noise Level&quot;:&quot;average&quot;                |
-| {&quot;Good For&quot;:{},&quot;Ambience&quot;:{},&quot;Parking&quot;:{},&quot;Music&quot;:{},&quot;Hair Types Specialized In&quot;:{},&quot;Payment Types&quot;:{},&quot;Dietary Restrictions&quot;:{}}                                                 |
-+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-</code></pre></div>
+|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| {"By Appointment Only":"true","Good For":{},"Ambience":{},"Parking":{},"Music":{},"Hair Types Specialized In":{},"Payment Types":{},"Dietary Restrictions":{}}                    |
+| {"Take-out":"true","Good For":{"dessert":"false","latenight":"false","lunch":"true","dinner":"false","breakfast":"false","brunch":"false"},"Caters":"false","Noise Level":"averag |
+| {"Take-out":"true","Good For":{"dessert":"false","latenight":"false","lunch":"false","dinner":"false","breakfast":"false","brunch":"true"},"Caters":"false","Noise Level":"quiet" |
+| {"Take-out":"true","Good For":{},"Takes Reservations":"false","Delivery":"false","Ambience":{},"Parking":{"garage":"false","street":"false","validated":"false","lot":"true","val |
+| {"Take-out":"true","Good For":{},"Ambience":{},"Parking":{},"Has TV":"false","Outdoor Seating":"false","Attire":"casual","Music":{},"Hair Types Specialized In":{},"Payment Types |
+| {"Good For":{},"Ambience":{},"Parking":{},"Music":{},"Hair Types Specialized In":{},"Payment Types":{},"Dietary Restrictions":{}}                                                 |
+| {"Good For":{},"Ambience":{},"Parking":{},"Music":{},"Hair Types Specialized In":{},"Payment Types":{},"Dietary Restrictions":{}}                                                 |
+| {"Good For":{},"Ambience":{},"Parking":{},"Wi-Fi":"free","Music":{},"Hair Types Specialized In":{},"Payment Types":{},"Dietary Restrictions":{}}                                  |
+| {"Take-out":"true","Good For":{"dessert":"false","latenight":"false","lunch":"false","dinner":"true","breakfast":"false","brunch":"false"},"Noise Level":"average"                |
+| {"Good For":{},"Ambience":{},"Parking":{},"Music":{},"Hair Types Specialized In":{},"Payment Types":{},"Dietary Restrictions":{}}                                                 |
+|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+</code></pre></div></div>
+
 <div class="admonition note">
   <p class="first admonition-title">Note</p>
   <p class="last">This document aligns Drill output for example purposes. Drill output is not aligned in this case.  </p>
@@ -1534,29 +1582,34 @@ the data).</p>
 
 <p>Turn off the all text mode so we can continue to perform arithmetic operations
 on data.</p>
-<div class="highlight"><pre><code class="language-text" data-lang="text">0: jdbc:drill:zk=local&gt; alter system set `store.json.all_text_mode` = false;
-+-------+------------------------------------+
+
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>0: jdbc:drill:zk=local&gt; alter system set `store.json.all_text_mode` = false;
+|-------|------------------------------------|
 |  ok   |              summary               |
-+-------+------------------------------------+
+|-------|------------------------------------|
 | true  | store.json.all_text_mode updated.  |
-+-------+------------------------------------+
-</code></pre></div>
+|-------|------------------------------------|
+</code></pre></div></div>
+
 <h3 id="4-explore-the-restaurant-businesses-in-the-data-set">4. Explore the restaurant businesses in the data set</h3>
 
 <h4 id="number-of-restaurants-in-the-data-set">Number of restaurants in the data set</h4>
-<div class="highlight"><pre><code class="language-text" data-lang="text">0: jdbc:drill:zk=local&gt; select count(*) as TotalRestaurants from dfs.`/&lt;path-to-yelp-dataset&gt;/yelp/yelp_academic_dataset_business.json` where true=repeated_contains(categories,&#39;Restaurants&#39;);
-+------------------+
+
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>0: jdbc:drill:zk=local&gt; select count(*) as TotalRestaurants from dfs.`/&lt;path-to-yelp-dataset&gt;/yelp/yelp_academic_dataset_business.json` where true=repeated_contains(categories,'Restaurants');
+|------------------|
 | TotalRestaurants |
-+------------------+
+|------------------|
 | 14303            |
-+------------------+
-</code></pre></div>
+|------------------|
+</code></pre></div></div>
+
 <h4 id="top-restaurants-in-number-of-reviews">Top restaurants in number of reviews</h4>
-<div class="highlight"><pre><code class="language-text" data-lang="text">0: jdbc:drill:zk=local&gt; select name,state,city,`review_count` from dfs.`/&lt;path-to-yelp-dataset&gt;/yelp/yelp_academic_dataset_business.json` where true=repeated_contains(categories,&#39;Restaurants&#39;) order by `review_count` desc limit 10;
 
-+------------------------+-------+-----------+--------------+
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>0: jdbc:drill:zk=local&gt; select name,state,city,`review_count` from dfs.`/&lt;path-to-yelp-dataset&gt;/yelp/yelp_academic_dataset_business.json` where true=repeated_contains(categories,'Restaurants') order by `review_count` desc limit 10;
+
+|------------------------|-------|-----------|--------------|
 |          name          | state |    city   | review_count |
-+------------------------+-------+-----------+--------------+
+|------------------------|-------|-----------|--------------|
 | Mon Ami Gabi           | NV    | Las Vegas | 4084         |
 | Earl of Sandwich       | NV    | Las Vegas | 3655         |
 | Wicked Spoon           | NV    | Las Vegas | 3408         |
@@ -1567,40 +1620,44 @@ on data.</p>
 | Bacchanal Buffet       | NV    | Las Vegas | 2369         |
 | Hash House A Go Go     | NV    | Las Vegas | 2201         |
 | Mesa Grill             | NV    | Las Vegas | 2004         |
-+------------------------+-------+-----------+--------------+
-</code></pre></div>
+|------------------------|-------|-----------|--------------|
+</code></pre></div></div>
+
 <h4 id="top-restaurants-in-number-of-listed-categories">Top restaurants in number of listed categories</h4>
-<div class="highlight"><pre><code class="language-text" data-lang="text">0: jdbc:drill:zk=local&gt; select name,repeated_count(categories) as categorycount, categories from dfs.`/&lt;path-to-yelp-dataset&gt;/yelp/yelp_academic_dataset_business.json` where true=repeated_contains(categories,&#39;Restaurants&#39;) order by repeated_count(categories) desc limit 10;
 
-+---------------------------------+---------------+---------------------------------------------------------------------------------------------------------------------------------------------------+
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>0: jdbc:drill:zk=local&gt; select name,repeated_count(categories) as categorycount, categories from dfs.`/&lt;path-to-yelp-dataset&gt;/yelp/yelp_academic_dataset_business.json` where true=repeated_contains(categories,'Restaurants') order by repeated_count(categories) desc limit 10;
+
+|---------------------------------|---------------|---------------------------------------------------------------------------------------------------------------------------------------------------|
 | name                            | categorycount | categories                                                                                                                                        |
-+---------------------------------+---------------+---------------------------------------------------------------------------------------------------------------------------------------------------+
-| Binion&#39;s Hotel &amp; Casino         | 10            | [&quot;Arts &amp;,Entertainment&quot;,&quot;Restaurants&quot;,&quot;Bars&quot;,&quot;Casinos&quot;,&quot;Event,Planning &amp;,Services&quot;,&quot;Lounges&quot;,&quot;Nightlife&quot;,&quot;Hotels &amp;,Travel&quot;,&quot;American]             |
-| Stage Deli                      | 10            | [&quot;Arts &amp;,Entertainment&quot;,&quot;Food&quot;,&quot;Hotels&quot;,&quot;Desserts&quot;,&quot;Delis&quot;,&quot;Casinos&quot;,&quot;Sandwiches&quot;,&quot;Hotels,&amp; Travel&quot;,&quot;Restaurants&quot;,&quot;Event Planning &amp;,Services&quot;]    |
-| Jillian&#39;s                       | 9             | [&quot;Arts &amp;,Entertainment&quot;,&quot;American (Traditional)&quot;,&quot;Music,Venues&quot;,&quot;Bars&quot;,&quot;Dance,Clubs&quot;,&quot;Nightlife&quot;,&quot;Bowling&quot;,&quot;Active,Life&quot;,&quot;Restaurants&quot;]           |
-| Hotel Chocolat                  | 9             | [&quot;Coffee &amp;,Tea&quot;,&quot;Food&quot;,&quot;Cafes&quot;,&quot;Chocolatiers &amp;,Shops&quot;,&quot;Specialty Food&quot;,&quot;Event Planning &amp;,Services&quot;,&quot;Hotels &amp; Travel&quot;,&quot;Hotels&quot;,&quot;Restaurants&quot;]      |
-| Hotel du Vin &amp; Bistro Edinburgh | 9             | [&quot;Modern,European&quot;,&quot;Bars&quot;,&quot;French&quot;,&quot;Wine,Bars&quot;,&quot;Event Planning &amp;,Services&quot;,&quot;Nightlife&quot;,&quot;Hotels &amp;,Travel&quot;,&quot;Hotels&quot;,&quot;Restaurants&quot;]                  |
-| Elixir                          | 9             | [&quot;Arts &amp;,Entertainment&quot;,&quot;American (Traditional)&quot;,&quot;Music,Venues&quot;,&quot;Bars&quot;,&quot;Cocktail,Bars&quot;,&quot;Nightlife&quot;,&quot;American (New)&quot;,&quot;Local,Flavor&quot;,&quot;Restaurants&quot;] |
-| Tocasierra Spa and Fitness      | 8             | [&quot;Beauty &amp;,Spas&quot;,&quot;Gyms&quot;,&quot;Medical Spas&quot;,&quot;Health &amp;,Medical&quot;,&quot;Fitness &amp; Instruction&quot;,&quot;Active,Life&quot;,&quot;Day Spas&quot;,&quot;Restaurants&quot;]                         |
-| Costa Del Sol At Sunset Station | 8             | [&quot;Steakhouses&quot;,&quot;Mexican&quot;,&quot;Seafood&quot;,&quot;Event,Planning &amp; Services&quot;,&quot;Hotels &amp;,Travel&quot;,&quot;Italian&quot;,&quot;Restaurants&quot;,&quot;Hotels&quot;]                                |
-| Scottsdale Silverado Golf Club  | 8             | [&quot;Fashion&quot;,&quot;Shopping&quot;,&quot;Sporting,Goods&quot;,&quot;Active Life&quot;,&quot;Golf&quot;,&quot;American,(New)&quot;,&quot;Sports Wear&quot;,&quot;Restaurants&quot;]                                         |
-| House of Blues                  | 8             | [&quot;Arts &amp; Entertainment&quot;,&quot;Music Venues&quot;,&quot;Restaurants&quot;,&quot;Hotels&quot;,&quot;Event Planning &amp; Services&quot;,&quot;Hotels &amp; Travel&quot;,&quot;American (New)&quot;,&quot;Nightlife&quot;]         |
-+---------------------------------+---------------+---------------------------------------------------------------------------------------------------------------------------------------------------+
-</code></pre></div>
+|---------------------------------|---------------|---------------------------------------------------------------------------------------------------------------------------------------------------|
+| Binion's Hotel &amp; Casino         | 10            | ["Arts &amp;,Entertainment","Restaurants","Bars","Casinos","Event,Planning &amp;,Services","Lounges","Nightlife","Hotels &amp;,Travel","American]             |
+| Stage Deli                      | 10            | ["Arts &amp;,Entertainment","Food","Hotels","Desserts","Delis","Casinos","Sandwiches","Hotels,&amp; Travel","Restaurants","Event Planning &amp;,Services"]    |
+| Jillian's                       | 9             | ["Arts &amp;,Entertainment","American (Traditional)","Music,Venues","Bars","Dance,Clubs","Nightlife","Bowling","Active,Life","Restaurants"]           |
+| Hotel Chocolat                  | 9             | ["Coffee &amp;,Tea","Food","Cafes","Chocolatiers &amp;,Shops","Specialty Food","Event Planning &amp;,Services","Hotels &amp; Travel","Hotels","Restaurants"]      |
+| Hotel du Vin &amp; Bistro Edinburgh | 9             | ["Modern,European","Bars","French","Wine,Bars","Event Planning &amp;,Services","Nightlife","Hotels &amp;,Travel","Hotels","Restaurants"]                  |
+| Elixir                          | 9             | ["Arts &amp;,Entertainment","American (Traditional)","Music,Venues","Bars","Cocktail,Bars","Nightlife","American (New)","Local,Flavor","Restaurants"] |
+| Tocasierra Spa and Fitness      | 8             | ["Beauty &amp;,Spas","Gyms","Medical Spas","Health &amp;,Medical","Fitness &amp; Instruction","Active,Life","Day Spas","Restaurants"]                         |
+| Costa Del Sol At Sunset Station | 8             | ["Steakhouses","Mexican","Seafood","Event,Planning &amp; Services","Hotels &amp;,Travel","Italian","Restaurants","Hotels"]                                |
+| Scottsdale Silverado Golf Club  | 8             | ["Fashion","Shopping","Sporting,Goods","Active Life","Golf","American,(New)","Sports Wear","Restaurants"]                                         |
+| House of Blues                  | 8             | ["Arts &amp; Entertainment","Music Venues","Restaurants","Hotels","Event Planning &amp; Services","Hotels &amp; Travel","American (New)","Nightlife"]         |
+|---------------------------------|---------------|---------------------------------------------------------------------------------------------------------------------------------------------------|
+</code></pre></div></div>
+
 <div class="admonition note">
   <p class="first admonition-title">Note</p>
   <p class="last">This document aligns Drill output for example purposes. Drill output is not aligned in this case.  </p>
 </div>
 
 <h4 id="top-first-categories-in-number-of-review-counts">Top first categories in number of review counts</h4>
-<div class="highlight"><pre><code class="language-text" data-lang="text">0: jdbc:drill:zk=local&gt; select categories[0], count(categories[0]) as categorycount 
+
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>0: jdbc:drill:zk=local&gt; select categories[0], count(categories[0]) as categorycount 
 from dfs.`/&lt;path-to-yelp-dataset&gt;/yelp_academic_dataset_business.json` 
 group by categories[0] 
 order by count(categories[0]) desc limit 10;
 
-+----------------------+---------------+
+|----------------------|---------------|
 | EXPR$0               | categorycount |
-+----------------------+---------------+
+|----------------------|---------------|
 | Food                 | 4294          |
 | Shopping             | 1885          |
 | Active Life          | 1676          |
@@ -1611,39 +1668,44 @@ order by count(categories[0]) desc limit 10;
 | Fast Food            | 963           |
 | Arts &amp; Entertainment | 906           |
 | Hair Salons          | 901           |
-+----------------------+---------------+
-</code></pre></div>
+|----------------------|---------------|
+</code></pre></div></div>
+
 <h3 id="5-explore-the-yelp-reviews-dataset-and-combine-with-the-businesses">5. Explore the Yelp reviews dataset and combine with the businesses.</h3>
 
 <h4 id="take-a-look-at-the-contents-of-the-yelp-reviews-dataset">Take a look at the contents of the Yelp reviews dataset.</h4>
-<div class="highlight"><pre><code class="language-text" data-lang="text">0: jdbc:drill:zk=local&gt; select * 
+
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>0: jdbc:drill:zk=local&gt; select * 
 from dfs.`/&lt;path-to-yelp-dataset&gt;/yelp/yelp_academic_dataset_review.json` limit 1;
-+---------------------------------+------------------------+------------------------+-------+------------+----------------------------------------------------------------------+--------+------------------------+
+|---------------------------------|------------------------|------------------------|-------|------------|----------------------------------------------------------------------|--------|------------------------|
 | votes                           | user_id                | review_id              | stars | date       | text                                                                 | type   | business_id            |
-+---------------------------------+------------------------+------------------------+-------+------------+----------------------------------------------------------------------+--------+------------------------+
-| {&quot;funny&quot;:0,&quot;useful&quot;:2,&quot;cool&quot;:1} | Xqd0DzHaiyRqVH3WRG7hzg | 15SdjuK7DmYqUAj6rjGowg | 5     | 2007-05-17 | dr. goldberg offers everything i look for in a general practitioner. | review | vcNAWiLM4dR7D2nwwJ7nCA |
-+---------------------------------+------------------------+------------------------+-------+------------+----------------------------------------------------------------------+--------+------------------------+
-</code></pre></div>
+|---------------------------------|------------------------|------------------------|-------|------------|----------------------------------------------------------------------|--------|------------------------|
+| {"funny":0,"useful":2,"cool":1} | Xqd0DzHaiyRqVH3WRG7hzg | 15SdjuK7DmYqUAj6rjGowg | 5     | 2007-05-17 | dr. goldberg offers everything i look for in a general practitioner. | review | vcNAWiLM4dR7D2nwwJ7nCA |
+|---------------------------------|------------------------|------------------------|-------|------------|----------------------------------------------------------------------|--------|------------------------|
+</code></pre></div></div>
+
 <h4 id="top-businesses-with-cool-rated-reviews">Top businesses with cool rated reviews</h4>
 
 <p>Note that we are combining the Yelp business data set that has the overall
 review_count to the Yelp review data, which holds additional details on each
 of the reviews themselves.</p>
-<div class="highlight"><pre><code class="language-text" data-lang="text">0: jdbc:drill:zk=local&gt; Select b.name 
+
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>0: jdbc:drill:zk=local&gt; Select b.name 
 from dfs.`/&lt;path-to-yelp-dataset&gt;/yelp/yelp_academic_dataset_business.json` b 
 where b.business_id in (SELECT r.business_id 
 FROM dfs.`/&lt;path-to-yelp-dataset&gt;/yelp/yelp_academic_dataset_review.json` r
 GROUP BY r.business_id having sum(r.votes.cool) &gt; 2000 
 order by sum(r.votes.cool)  desc);
-+-------------------------------+
+|-------------------------------|
 |             name              |
-+-------------------------------+
+|-------------------------------|
 | Earl of Sandwich              |
 | XS Nightclub                  |
 | The Cosmopolitan of Las Vegas |
 | Wicked Spoon                  |
-+-------------------------------+
-</code></pre></div>
+|-------------------------------|
+</code></pre></div></div>
+
 <h4 id="create-a-view-with-the-combined-business-and-reviews-data-sets">Create a view with the combined business and reviews data sets</h4>
 
 <p>Note that Drill views are lightweight, and can just be created in the local
@@ -1651,24 +1713,28 @@ file system. Drill in standalone mode comes with a dfs.tmp workspace, which we
 can use to create views (or you can can define your own workspaces on a local
 or distributed file system). If you want to persist the data physically
 instead of in a logical view, you can use CREATE TABLE AS syntax.</p>
-<div class="highlight"><pre><code class="language-text" data-lang="text">0: jdbc:drill:zk=local&gt; create or replace view dfs.tmp.businessreviews as 
+
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>0: jdbc:drill:zk=local&gt; create or replace view dfs.tmp.businessreviews as 
 Select b.name,b.stars,b.state,b.city,r.votes.funny,r.votes.useful,r.votes.cool, r.`date` 
 from dfs.`/&lt;path-to-yelp-dataset&gt;/yelp/yelp_academic_dataset_business.json` b, dfs.`/&lt;path-to-yelp-dataset&gt;/yelp/yelp_academic_dataset_review.json` r 
 where r.business_id=b.business_id
-+------------+-----------------------------------------------------------------+
+|------------|-----------------------------------------------------------------|
 |     ok     |                           summary                               |
-+------------+-----------------------------------------------------------------+
-| true       | View &#39;businessreviews&#39; created successfully in &#39;dfs.tmp&#39; schema |
-+------------+-----------------------------------------------------------------+
-</code></pre></div>
+|------------|-----------------------------------------------------------------|
+| true       | View 'businessreviews' created successfully in 'dfs.tmp' schema |
+|------------|-----------------------------------------------------------------|
+</code></pre></div></div>
+
 <p>Let’s get the total number of records from the view.</p>
-<div class="highlight"><pre><code class="language-text" data-lang="text">0: jdbc:drill:zk=local&gt; select count(*) as Total from dfs.tmp.businessreviews;
-+------------+
+
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>0: jdbc:drill:zk=local&gt; select count(*) as Total from dfs.tmp.businessreviews;
+|------------|
 |   Total    |
-+------------+
+|------------|
 | 1125458    |
-+------------+
-</code></pre></div>
+|------------|
+</code></pre></div></div>
+
 <p>In addition to these queries, you can get many deep insights using
 Drill’s <a href="/docs/sql-reference">SQL functionality</a>. If you are not comfortable with writing queries manually, you
 can use a BI/Analytics tools such as Tableau/MicroStrategy to query raw
@@ -1684,20 +1750,21 @@ supporting data with changing schemas in upcoming releases.</p>
 data so you can apply even deeper SQL functionality. Here is a sample query:</p>
 
 <h4 id="get-a-flattened-list-of-categories-for-each-business">Get a flattened list of categories for each business</h4>
-<div class="highlight"><pre><code class="language-text" data-lang="text">0: jdbc:drill:zk=local&gt; select name, flatten(categories) as category 
+
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>0: jdbc:drill:zk=local&gt; select name, flatten(categories) as category 
 from dfs.`/&lt;path-to-yelp-dataset&gt;/yelp/yelp_academic_dataset_business.json`  limit 20;
-+-----------------------------+---------------------------------+
+|-----------------------------|---------------------------------|
 | name                        | category                        |
-+-----------------------------+---------------------------------+
+|-----------------------------|---------------------------------|
 | Eric Goldberg, MD           | Doctors                         |
 | Eric Goldberg, MD           | Health &amp; Medical                |
 | Pine Cone Restaurant        | Restaurants                     |
 | Deforest Family Restaurant  | American (Traditional)          |
 | Deforest Family Restaurant  | Restaurants                     |
-| Culver&#39;s                    | Food                            |
-| Culver&#39;s                    | Ice Cream &amp; Frozen Yogurt       |
-| Culver&#39;s                    | Fast Food                       |
-| Culver&#39;s                    | Restaurants                     |
+| Culver's                    | Food                            |
+| Culver's                    | Ice Cream &amp; Frozen Yogurt       |
+| Culver's                    | Fast Food                       |
+| Culver's                    | Restaurants                     |
 | Chang Jiang Chinese Kitchen | Chinese                         |
 | Chang Jiang Chinese Kitchen | Restaurants                     |
 | Charter Communications      | Television Stations             |
@@ -1709,16 +1776,18 @@ from dfs.`/&lt;path-to-yelp-dataset&gt;/yelp/yelp_academic_dataset_business.json
 | Green Lantern Restaurant    | American (Traditional)          |
 | Green Lantern Restaurant    | Restaurants                     |
 | Spartan Animal Hospital     | Veterinarians                   |
-+-----------------------------+---------------------------------+
-</code></pre></div>
+|-----------------------------|---------------------------------|
+</code></pre></div></div>
+
 <h4 id="top-categories-used-in-business-reviews">Top categories used in business reviews</h4>
-<div class="highlight"><pre><code class="language-text" data-lang="text">0: jdbc:drill:zk=local&gt; select celltbl.catl, count(celltbl.catl) categorycnt 
+
+<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>0: jdbc:drill:zk=local&gt; select celltbl.catl, count(celltbl.catl) categorycnt 
 from (select flatten(categories) catl from dfs.`/yelp_academic_dataset_business.json` ) celltbl 
 group by celltbl.catl 
 order by count(celltbl.catl) desc limit 10 ;
-+------------------+-------------+
+|------------------|-------------|
 | catl             | categorycnt |
-+------------------+-------------+
+|------------------|-------------|
 | Restaurants      | 14303       |
 | Shopping         | 6428        |
 | Food             | 5209        |
@@ -1729,19 +1798,21 @@ order by count(celltbl.catl) desc limit 10 ;
 | Automotive       | 2241        |
 | Home Services    | 1957        |
 | Fashion          | 1897        |
-+------------------+-------------+
-</code></pre></div>
+|------------------|-------------|
+</code></pre></div></div>
+
 <p>Stay tuned for more features and upcoming activities in the Drill community.</p>
 
 <p>To learn more about Drill, please refer to the following resources:</p>
 
 <ul>
-<li>Download Drill here: <a href="http://getdrill.org/drill/download">http://getdrill.org/drill/download</a></li>
-<li><a href="/docs/why-drill">10 reasons we think Drill is cool</a></li>
-<li><a href="/docs/drill-in-10-minutes">A simple 10-minute tutorial</a></li>
-<li><a href="/docs/tutorials-introduction/">More tutorials</a></li>
+  <li>Download Drill here: <a href="http://getdrill.org/drill/download">http://getdrill.org/drill/download</a></li>
+  <li><a href="/docs/why-drill">10 reasons we think Drill is cool</a></li>
+  <li><a href="/docs/drill-in-10-minutes&gt;">A simple 10-minute tutorial</a></li>
+  <li><a href="/docs/tutorials-introduction/">More tutorials</a></li>
 </ul>
 
+
     
       
         <div class="doc-nav">
@@ -1758,7 +1829,7 @@ order by count(celltbl.catl) desc limit 10 ;
   <p class="push"></p>
 <div id="footer" class="mw">
 <div class="wrapper">
-Copyright © 2012-2014 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
+Copyright © 2012-2020 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
 Apache and the Apache feather logo are trademarks of The Apache Software Foundation. Other names appearing on the site may be trademarks of their respective owners.<br/><br/>
 </div>
 </div>
diff --git a/docs/apache-drill-0-4-0-release-notes/index.html b/docs/apache-drill-0-4-0-release-notes/index.html
index bc88551..a021994 100644
--- a/docs/apache-drill-0-4-0-release-notes/index.html
+++ b/docs/apache-drill-0-4-0-release-notes/index.html
@@ -294,8 +294,6 @@
               
                 <li class="toctree-l3"><a class="reference internal" href="/docs/embedded-mode-prerequisites/">Embedded Mode Prerequisites</a></li>
               
-                <li class="toctree-l3"><a class="reference internal" href="/docs/running-drill-on-docker/">Running Drill on Docker</a></li>
-              
                 <li class="toctree-l3"><a class="reference internal" href="/docs/installing-drill-on-linux-and-mac-os-x/">Installing Drill on Linux and Mac OS X</a></li>
               
                 <li class="toctree-l3"><a class="reference internal" href="/docs/starting-drill-on-linux-and-mac-os-x/">Starting Drill on Linux and Mac OS X</a></li>
@@ -459,10 +457,6 @@
             
           
             
-              <li class="toctree-l2"><a class="reference internal" href="/docs/logfile-plugin/">Logfile Plugin</a></li>
-            
-          
-            
               <li class="toctree-l2"><a class="reference internal" href="/docs/hbase-storage-plugin/">HBase Storage Plugin</a></li>
             
           
@@ -487,10 +481,6 @@
             
           
             
-              <li class="toctree-l2"><a class="reference internal" href="/docs/httpd-format-plugin/">HTTPD Format Plugin</a></li>
-            
-          
-            
               <li class="toctree-l2"><a class="reference internal" href="/docs/opentsdb-storage-plugin/">OpenTSDB Storage Plugin</a></li>
             
           
@@ -499,19 +489,15 @@
             
           
             
-              <li class="toctree-l2"><a class="reference internal" href="/docs/image-metadata-format-plugin/">Image Metadata Format Plugin</a></li>
-            
-          
-            
               <li class="toctree-l2"><a class="reference internal" href="/docs/azure-blob-storage-plugin/">Azure Blob Storage Plugin</a></li>
             
           
             
-              <li class="toctree-l2"><a class="reference internal" href="/docs/syslog-format-plugin/">Syslog Format Plugin</a></li>
+              <li class="toctree-l2"><a class="reference internal" href="/docs/druid-storage-plugin/">Druid Storage Plugin</a></li>
             
           
             
-              <li class="toctree-l2"><a class="reference internal" href="/docs/ltsv-format-plugin/">LTSV Format Plugin</a></li>
+              <li class="toctree-l2"><a class="reference internal" href="/docs/http-storage-plugin/">HTTP Storage Plugin</a></li>
             
           
           </ul>
@@ -886,6 +872,12 @@
               
                 <li class="toctree-l3"><a class="reference internal" href="/docs/cryptography-functions/">Cryptography Functions</a></li>
               
+                <li class="toctree-l3"><a class="reference internal" href="/docs/sql-dialect-compatibility-functions/">SQL dialect compatibility functions</a></li>
+              
+                <li class="toctree-l3"><a class="reference internal" href="/docs/gis-functions/">GIS functions</a></li>
+              
+                <li class="toctree-l3"><a class="reference internal" href="/docs/time-series-analysis-functions/">Time Series Analysis Functions</a></li>
+              
             </ul>
             
           
@@ -1043,6 +1035,10 @@
             
           
             
+              <li class="toctree-l2"><a class="reference internal" href="/docs/logfile-plugin/">Logfile Plugin</a></li>
+            
+          
+            
               <li class="toctree-l2"><a class="reference internal" href="/docs/json-data-model/">JSON Data Model</a></li>
             
           
@@ -1054,6 +1050,38 @@
               <li class="toctree-l2"><a class="reference internal" href="/docs/sequence-files/">Sequence Files</a></li>
             
           
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/httpd-format-plugin/">HTTPD Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/image-metadata-format-plugin/">Image Metadata Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/syslog-format-plugin/">Syslog Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/ltsv-format-plugin/">LTSV Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/spss-format-plugin/">SPSS Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/esri-shapefile-format-plugin/">ESRI Shapefile Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/excel-format-plugin/">Excel Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/hdf5-format-plugin/">HDF5 Format Plugin</a></li>
+            
+          
           </ul>
         
       
@@ -1171,6 +1199,10 @@
           <ul class="current_section">
           
             
+              <li class="toctree-l2"><a class="reference internal" href="/docs/apache-drill-1-18-0-release-notes/">Apache Drill 1.18.0 Release Notes</a></li>
+            
+          
+            
               <li class="toctree-l2"><a class="reference internal" href="/docs/apache-drill-1-17-0-release-notes/">Apache Drill 1.17.0 Release Notes</a></li>
             
           
@@ -1335,9 +1367,9 @@
 
     </div>
 
-     
-
-    <link href="/css/docpage.css" rel="stylesheet" type="text/css">
+    <!-- jt: we don't need to display a last-modified date on each page to users
+     Dec 10, 2020
+    --> 
 
     <div class="int_text" align="left">
       
@@ -1358,8 +1390,10 @@ to see whether your use case is affected.</p>
 <a href="https://blogs.apache.org/drill/entry/announcing_apache_drill_0_4">0.4.0 announcement blog
 entry</a>.</p>
 
-<p>The release is available as both <a href="http://www.apache.org/dyn/closer.cgi%0A/incubator/drill/drill-0.4.0-incubating/apache-drill-0.4.0-incubating.tar.gz">binary</a>
-and <a href="http://www.apache.org/dyn/closer.cgi/incubator/drill/drill-0.4.0-%0Aincubating/apache-drill-0.4.0-incubating-src.tar.gz">source</a> tarballs. In both cases,
+<p>The release is available as both <a href="http://www.apache.org/dyn/closer.cgi
+/incubator/drill/drill-0.4.0-incubating/apache-drill-0.4.0-incubating.tar.gz">binary</a>
+and <a href="http://www.apache.org/dyn/closer.cgi/incubator/drill/drill-0.4.0-
+incubating/apache-drill-0.4.0-incubating-src.tar.gz">source</a> tarballs. In both cases,
 these are compiled against Apache Hadoop. Drill has also been tested against
 MapR, Cloudera and Hortonworks Hadoop distributions and there are associated
 build profiles or JIRAs that can help you run against your preferred
@@ -1368,16 +1402,17 @@ distribution.</p>
 <p>Some Key Notes &amp; Limitations</p>
 
 <ul>
-<li>The current release supports in memory and beyond memory execution. However, users must disable memory-intensive hash aggregate and hash join operations to leverage this functionality.</li>
-<li>In many cases,merge join operations return incorrect results.</li>
-<li>Use of a local filter in a join “on” clause when using left, right or full outer joins may result in incorrect results.</li>
-<li>Because of known memory leaks and memory overrun issues you may need more memory and you may need to restart the system in some cases.</li>
-<li>Some types of complex expressions, especially those involving empty arrays may fail or return incorrect results.</li>
-<li>While the Drill execution engine supports dynamic schema changes during the course of a query, some operators have yet to implement support for this behavior (such as Sort). Others operations (such as streaming aggregate) may have partial support that leads to unexpected results.</li>
-<li>Protobuf, UDF, query plan interfaces and all interfaces are subject to change in incompatible ways.</li>
-<li>Multiplication of some types of DECIMAL(28+,*) will return incorrect result.</li>
+  <li>The current release supports in memory and beyond memory execution. However, users must disable memory-intensive hash aggregate and hash join operations to leverage this functionality.</li>
+  <li>In many cases,merge join operations return incorrect results.</li>
+  <li>Use of a local filter in a join “on” clause when using left, right or full outer joins may result in incorrect results.</li>
+  <li>Because of known memory leaks and memory overrun issues you may need more memory and you may need to restart the system in some cases.</li>
+  <li>Some types of complex expressions, especially those involving empty arrays may fail or return incorrect results.</li>
+  <li>While the Drill execution engine supports dynamic schema changes during the course of a query, some operators have yet to implement support for this behavior (such as Sort). Others operations (such as streaming aggregate) may have partial support that leads to unexpected results.</li>
+  <li>Protobuf, UDF, query plan interfaces and all interfaces are subject to change in incompatible ways.</li>
+  <li>Multiplication of some types of DECIMAL(28+,*) will return incorrect result.</li>
 </ul>
 
+
     
       
         <div class="doc-nav">
@@ -1394,7 +1429,7 @@ distribution.</p>
   <p class="push"></p>
 <div id="footer" class="mw">
 <div class="wrapper">
-Copyright © 2012-2014 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
+Copyright © 2012-2020 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
 Apache and the Apache feather logo are trademarks of The Apache Software Foundation. Other names appearing on the site may be trademarks of their respective owners.<br/><br/>
 </div>
 </div>
diff --git a/docs/apache-drill-0-5-0-release-notes/index.html b/docs/apache-drill-0-5-0-release-notes/index.html
index 3f8b665..a3a8ca1 100644
--- a/docs/apache-drill-0-5-0-release-notes/index.html
+++ b/docs/apache-drill-0-5-0-release-notes/index.html
@@ -294,8 +294,6 @@
               
                 <li class="toctree-l3"><a class="reference internal" href="/docs/embedded-mode-prerequisites/">Embedded Mode Prerequisites</a></li>
               
-                <li class="toctree-l3"><a class="reference internal" href="/docs/running-drill-on-docker/">Running Drill on Docker</a></li>
-              
                 <li class="toctree-l3"><a class="reference internal" href="/docs/installing-drill-on-linux-and-mac-os-x/">Installing Drill on Linux and Mac OS X</a></li>
               
                 <li class="toctree-l3"><a class="reference internal" href="/docs/starting-drill-on-linux-and-mac-os-x/">Starting Drill on Linux and Mac OS X</a></li>
@@ -459,10 +457,6 @@
             
           
             
-              <li class="toctree-l2"><a class="reference internal" href="/docs/logfile-plugin/">Logfile Plugin</a></li>
-            
-          
-            
               <li class="toctree-l2"><a class="reference internal" href="/docs/hbase-storage-plugin/">HBase Storage Plugin</a></li>
             
           
@@ -487,10 +481,6 @@
             
           
             
-              <li class="toctree-l2"><a class="reference internal" href="/docs/httpd-format-plugin/">HTTPD Format Plugin</a></li>
-            
-          
-            
               <li class="toctree-l2"><a class="reference internal" href="/docs/opentsdb-storage-plugin/">OpenTSDB Storage Plugin</a></li>
             
           
@@ -499,19 +489,15 @@
             
           
             
-              <li class="toctree-l2"><a class="reference internal" href="/docs/image-metadata-format-plugin/">Image Metadata Format Plugin</a></li>
-            
-          
-            
               <li class="toctree-l2"><a class="reference internal" href="/docs/azure-blob-storage-plugin/">Azure Blob Storage Plugin</a></li>
             
           
             
-              <li class="toctree-l2"><a class="reference internal" href="/docs/syslog-format-plugin/">Syslog Format Plugin</a></li>
+              <li class="toctree-l2"><a class="reference internal" href="/docs/druid-storage-plugin/">Druid Storage Plugin</a></li>
             
           
             
-              <li class="toctree-l2"><a class="reference internal" href="/docs/ltsv-format-plugin/">LTSV Format Plugin</a></li>
+              <li class="toctree-l2"><a class="reference internal" href="/docs/http-storage-plugin/">HTTP Storage Plugin</a></li>
             
           
           </ul>
@@ -886,6 +872,12 @@
               
                 <li class="toctree-l3"><a class="reference internal" href="/docs/cryptography-functions/">Cryptography Functions</a></li>
               
+                <li class="toctree-l3"><a class="reference internal" href="/docs/sql-dialect-compatibility-functions/">SQL dialect compatibility functions</a></li>
+              
+                <li class="toctree-l3"><a class="reference internal" href="/docs/gis-functions/">GIS functions</a></li>
+              
+                <li class="toctree-l3"><a class="reference internal" href="/docs/time-series-analysis-functions/">Time Series Analysis Functions</a></li>
+              
             </ul>
             
           
@@ -1043,6 +1035,10 @@
             
           
             
+              <li class="toctree-l2"><a class="reference internal" href="/docs/logfile-plugin/">Logfile Plugin</a></li>
+            
+          
+            
               <li class="toctree-l2"><a class="reference internal" href="/docs/json-data-model/">JSON Data Model</a></li>
             
           
@@ -1054,6 +1050,38 @@
               <li class="toctree-l2"><a class="reference internal" href="/docs/sequence-files/">Sequence Files</a></li>
             
           
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/httpd-format-plugin/">HTTPD Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/image-metadata-format-plugin/">Image Metadata Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/syslog-format-plugin/">Syslog Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/ltsv-format-plugin/">LTSV Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/spss-format-plugin/">SPSS Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/esri-shapefile-format-plugin/">ESRI Shapefile Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/excel-format-plugin/">Excel Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/hdf5-format-plugin/">HDF5 Format Plugin</a></li>
+            
+          
           </ul>
         
       
@@ -1171,6 +1199,10 @@
           <ul class="current_section">
           
             
+              <li class="toctree-l2"><a class="reference internal" href="/docs/apache-drill-1-18-0-release-notes/">Apache Drill 1.18.0 Release Notes</a></li>
+            
+          
+            
               <li class="toctree-l2"><a class="reference internal" href="/docs/apache-drill-1-17-0-release-notes/">Apache Drill 1.17.0 Release Notes</a></li>
             
           
@@ -1335,9 +1367,9 @@
 
     </div>
 
-     
-
-    <link href="/css/docpage.css" rel="stylesheet" type="text/css">
+    <!-- jt: we don't need to display a last-modified date on each page to users
+     Dec 10, 2020
+    --> 
 
     <div class="int_text" align="left">
       
@@ -1345,12 +1377,16 @@
 enthusiasts start working and experimenting with Drill. It also continues the
 Drill monthly release cycle as we drive towards general availability.</p>
 
-<p>The 0.5.0 release is primarily a bug fix release, with <a href="h%0Attps://issues.apache.org/jira/secure/ReleaseNote.jspa?projectId=12313820&versi%0Aon=12324880">more than 100 JIRAs</a> closed, but there are some notable features. For information
+<p>The 0.5.0 release is primarily a bug fix release, with <a href="h
+ttps://issues.apache.org/jira/secure/ReleaseNote.jspa?projectId=12313820&amp;versi
+on=12324880">more than 100 JIRAs</a> closed, but there are some notable features. For information
 about the features, see the <a href="https://blogs.apache.org/drill/entry/apache_drill_beta_release_see">Apache Drill Blog for the 0.5.0
 release</a>.</p>
 
-<p>This release is available as <a href="http://www.apache.org/dyn/closer.cgi/inc%0Aubator/drill/drill-0.5.0-incubating/apache-drill-0.5.0-incubating.tar.gz">binary</a> and 
-<a href="http://www.apache.org/dyn/closer.cgi/incubator/drill/drill-0.5.0-incu%0Abating/apache-drill-0.5.0-incubating-src.tar.gz">source</a> tarballs that are compiled
+<p>This release is available as <a href="http://www.apache.org/dyn/closer.cgi/inc
+ubator/drill/drill-0.5.0-incubating/apache-drill-0.5.0-incubating.tar.gz">binary</a> and 
+<a href="http://www.apache.org/dyn/closer.cgi/incubator/drill/drill-0.5.0-incu
+bating/apache-drill-0.5.0-incubating-src.tar.gz">source</a> tarballs that are compiled
 against Apache Hadoop. Drill has been tested against MapR, Cloudera, and
 Hortonworks Hadoop distributions. There are associated build profiles and
 JIRAs that can help you run Drill against your preferred distribution.</p>
@@ -1358,11 +1394,12 @@ JIRAs that can help you run Drill against your preferred distribution.</p>
 <p>Apache Drill 0.5.0 Key Notes and Limitations</p>
 
 <ul>
-<li>The current release supports in memory and beyond memory execution. However, you must disable memory-intensive hash aggregate and hash join operations to leverage this functionality.</li>
-<li>While the Drill execution engine supports dynamic schema changes during the course of a query, some operators have yet to implement support for this behavior, such as Sort. Others operations, such as streaming aggregate, may have partial support that leads to unexpected results.</li>
-<li>There are known issues with joining text files without using an intervening view. See <a href="https://issues.apache.org/jira/browse/DRILL-1401">DRILL-1401</a> for more information.</li>
+  <li>The current release supports in memory and beyond memory execution. However, you must disable memory-intensive hash aggregate and hash join operations to leverage this functionality.</li>
+  <li>While the Drill execution engine supports dynamic schema changes during the course of a query, some operators have yet to implement support for this behavior, such as Sort. Others operations, such as streaming aggregate, may have partial support that leads to unexpected results.</li>
+  <li>There are known issues with joining text files without using an intervening view. See <a href="https://issues.apache.org/jira/browse/DRILL-1401">DRILL-1401</a> for more information.</li>
 </ul>
 
+
     
       
         <div class="doc-nav">
@@ -1379,7 +1416,7 @@ JIRAs that can help you run Drill against your preferred distribution.</p>
   <p class="push"></p>
 <div id="footer" class="mw">
 <div class="wrapper">
-Copyright © 2012-2014 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
+Copyright © 2012-2020 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
 Apache and the Apache feather logo are trademarks of The Apache Software Foundation. Other names appearing on the site may be trademarks of their respective owners.<br/><br/>
 </div>
 </div>
diff --git a/docs/apache-drill-0-6-0-release-notes/index.html b/docs/apache-drill-0-6-0-release-notes/index.html
index 0cc792d..7a1aba6 100644
--- a/docs/apache-drill-0-6-0-release-notes/index.html
+++ b/docs/apache-drill-0-6-0-release-notes/index.html
@@ -294,8 +294,6 @@
               
                 <li class="toctree-l3"><a class="reference internal" href="/docs/embedded-mode-prerequisites/">Embedded Mode Prerequisites</a></li>
               
-                <li class="toctree-l3"><a class="reference internal" href="/docs/running-drill-on-docker/">Running Drill on Docker</a></li>
-              
                 <li class="toctree-l3"><a class="reference internal" href="/docs/installing-drill-on-linux-and-mac-os-x/">Installing Drill on Linux and Mac OS X</a></li>
               
                 <li class="toctree-l3"><a class="reference internal" href="/docs/starting-drill-on-linux-and-mac-os-x/">Starting Drill on Linux and Mac OS X</a></li>
@@ -459,10 +457,6 @@
             
           
             
-              <li class="toctree-l2"><a class="reference internal" href="/docs/logfile-plugin/">Logfile Plugin</a></li>
-            
-          
-            
               <li class="toctree-l2"><a class="reference internal" href="/docs/hbase-storage-plugin/">HBase Storage Plugin</a></li>
             
           
@@ -487,10 +481,6 @@
             
           
             
-              <li class="toctree-l2"><a class="reference internal" href="/docs/httpd-format-plugin/">HTTPD Format Plugin</a></li>
-            
-          
-            
               <li class="toctree-l2"><a class="reference internal" href="/docs/opentsdb-storage-plugin/">OpenTSDB Storage Plugin</a></li>
             
           
@@ -499,19 +489,15 @@
             
           
             
-              <li class="toctree-l2"><a class="reference internal" href="/docs/image-metadata-format-plugin/">Image Metadata Format Plugin</a></li>
-            
-          
-            
               <li class="toctree-l2"><a class="reference internal" href="/docs/azure-blob-storage-plugin/">Azure Blob Storage Plugin</a></li>
             
           
             
-              <li class="toctree-l2"><a class="reference internal" href="/docs/syslog-format-plugin/">Syslog Format Plugin</a></li>
+              <li class="toctree-l2"><a class="reference internal" href="/docs/druid-storage-plugin/">Druid Storage Plugin</a></li>
             
           
             
-              <li class="toctree-l2"><a class="reference internal" href="/docs/ltsv-format-plugin/">LTSV Format Plugin</a></li>
+              <li class="toctree-l2"><a class="reference internal" href="/docs/http-storage-plugin/">HTTP Storage Plugin</a></li>
             
           
           </ul>
@@ -886,6 +872,12 @@
               
                 <li class="toctree-l3"><a class="reference internal" href="/docs/cryptography-functions/">Cryptography Functions</a></li>
               
+                <li class="toctree-l3"><a class="reference internal" href="/docs/sql-dialect-compatibility-functions/">SQL dialect compatibility functions</a></li>
+              
+                <li class="toctree-l3"><a class="reference internal" href="/docs/gis-functions/">GIS functions</a></li>
+              
+                <li class="toctree-l3"><a class="reference internal" href="/docs/time-series-analysis-functions/">Time Series Analysis Functions</a></li>
+              
             </ul>
             
           
@@ -1043,6 +1035,10 @@
             
           
             
+              <li class="toctree-l2"><a class="reference internal" href="/docs/logfile-plugin/">Logfile Plugin</a></li>
+            
+          
+            
               <li class="toctree-l2"><a class="reference internal" href="/docs/json-data-model/">JSON Data Model</a></li>
             
           
@@ -1054,6 +1050,38 @@
               <li class="toctree-l2"><a class="reference internal" href="/docs/sequence-files/">Sequence Files</a></li>
             
           
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/httpd-format-plugin/">HTTPD Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/image-metadata-format-plugin/">Image Metadata Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/syslog-format-plugin/">Syslog Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/ltsv-format-plugin/">LTSV Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/spss-format-plugin/">SPSS Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/esri-shapefile-format-plugin/">ESRI Shapefile Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/excel-format-plugin/">Excel Format Plugin</a></li>
+            
+          
+            
+              <li class="toctree-l2"><a class="reference internal" href="/docs/hdf5-format-plugin/">HDF5 Format Plugin</a></li>
+            
+          
           </ul>
         
       
@@ -1171,6 +1199,10 @@
           <ul class="current_section">
           
             
+              <li class="toctree-l2"><a class="reference internal" href="/docs/apache-drill-1-18-0-release-notes/">Apache Drill 1.18.0 Release Notes</a></li>
+            
+          
+            
               <li class="toctree-l2"><a class="reference internal" href="/docs/apache-drill-1-17-0-release-notes/">Apache Drill 1.17.0 Release Notes</a></li>
             
           
@@ -1335,9 +1367,9 @@
 
     </div>
 
-     
-
-    <link href="/css/docpage.css" rel="stylesheet" type="text/css">
+    <!-- jt: we don't need to display a last-modified date on each page to users
+     Dec 10, 2020
+    --> 
 
     <div class="int_text" align="left">
       
@@ -1345,30 +1377,34 @@
 enthusiasts start working and experimenting with Drill. It also continues the
 Drill monthly release cycle as we drive towards general availability.</p>
 
-<p>This release is available as <a href="http://www.apache.org/dyn/closer.cgi/inc%0Aubator/drill/drill-0.5.0-incubating/apache-drill-0.5.0-incubating.tar.gz">binary</a> and 
-<a href="http://www.apache.org/dyn/closer.cgi/incubator/drill/drill-0.5.0-incu%0Abating/apache-drill-0.5.0-incubating-src.tar.gz">source</a> tarballs that are compiled
+<p>This release is available as <a href="http://www.apache.org/dyn/closer.cgi/inc
+ubator/drill/drill-0.5.0-incubating/apache-drill-0.5.0-incubating.tar.gz">binary</a> and 
+<a href="http://www.apache.org/dyn/closer.cgi/incubator/drill/drill-0.5.0-incu
+bating/apache-drill-0.5.0-incubating-src.tar.gz">source</a> tarballs that are compiled
 against Apache Hadoop. Drill has been tested against MapR, Cloudera, and
 Hortonworks Hadoop distributions. There are associated build profiles and
 JIRAs that can help you run Drill against your preferred distribution.</p>
 
 <p>Apache Drill 0.6.0 Key Features</p>
 
-<p>This release is primarily a bug fix release, with <a href="https://issues.apache.org/jira/secure/ReleaseNote.jspa?projectId=12313820&vers%0Aion=12327472">more than 30 JIRAs closed</a>, but there are some notable features:</p>
+<p>This release is primarily a bug fix release, with <a href="https://issues.apache.org/jira/secure/ReleaseNote.jspa?projectId=12313820&amp;vers
+ion=12327472">more than 30 JIRAs closed</a>, but there are some notable features:</p>
 
 <ul>
-<li>Direct ANSI SQL access to MongoDB, using the latest <a href="/docs/mongodb-storage-plugin">MongoDB Plugin for Apache Drill</a></li>
-<li>Filesystem query performance improvements with partition pruning</li>
-<li>Ability to use the file system as a persistent store for query profiles and diagnostic information</li>
-<li>Window function support (alpha)</li>
+  <li>Direct ANSI SQL access to MongoDB, using the latest <a href="/docs/mongodb-storage-plugin">MongoDB Plugin for Apache Drill</a></li>
+  <li>Filesystem query performance improvements with partition pruning</li>
+  <li>Ability to use the file system as a persistent store for query profiles and diagnostic information</li>
+  <li>Window function support (alpha)</li>
 </ul>
 
 <p>Apache Drill 0.6.0 Key Notes and Limitations</p>
 
 <ul>
-<li>The current release supports in-memory and beyond-memory execution. However, you must disable memory-intensive hash aggregate and hash join operations to leverage this functionality.</li>
-<li>While the Drill execution engine supports dynamic schema changes during the course of a query, some operators have yet to implement support for this behavior, such as Sort. Other operations, such as streaming aggregate, may have partial support that leads to unexpected results.</li>
+  <li>The current release supports in-memory and beyond-memory execution. However, you must disable memory-intensive hash aggregate and hash join operations to leverage this functionality.</li>
+  <li>While the Drill execution engine supports dynamic schema changes during the course of a query, some operators have yet to implement support for this behavior, such as Sort. Other operations, such as streaming aggregate, may have partial support that leads to unexpected results.</li>
 </ul>
 
+
     
       
         <div class="doc-nav">
@@ -1385,7 +1421,7 @@ JIRAs that can help you run Drill against your preferred distribution.</p>
   <p class="push"></p>
 <div id="footer" class="mw">
 <div class="wrapper">
-Copyright © 2012-2014 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
+Copyright © 2012-2020 The Apache Software Foundation, licensed under the Apache License, Version 2.0.<br>
 Apache and the Apache feather logo are trademarks of The Apache Software Foundation. Other names appearing on the site may be trademarks of their respective owners.<br/><br/>
 </div>
 </div>
diff --git a/docs/apache-drill-0-7-0-release-notes/index.html b/docs/apache-drill-0-7-0-release-notes/index.html
index 5c03d53..7188208 100644
--- a/docs/apache-drill-0-7-0-release-notes/index.html
+++ b/docs/apache-drill-0-7-0-release-notes/index.html
@@ -294,8 +294,6 @@
               
                 <li class="toctree-l3"><a class="reference internal" href="/docs/embedded-mode-prerequisites/">Embedded Mode Prerequisites</a></li>
               
-                <li class="toctree-l3"><a class="reference internal" href="/docs/running-drill-on-docker/">Running Drill on Docker</a></li>
-              
                 <li class="toctree-l3"><a class="reference internal" href="/docs/installing-drill-on-linux-and-mac-os-x/">Installing Drill on Linux and Mac OS X</a></li>
               
                 <li class="toctree-l3"><a class="reference internal" href="/docs/starting-drill-on-linux-and-mac-os-x/">Starting Drill on Linux and Mac OS X</a></li>
@@ -459,10 +457,6 @@
             
           
             
-              <li class="toctree-l2"><a class="reference internal" href="/docs/logfile-plugin/">Logfile Plugin</a></li>
-            
-          
-            
               <li class="toctree-l2"><a class="reference internal" href="/docs/hbase-storage-plugin/">HBase Storage Plugin</a></li>
             
           
@@ -487,10 +481,6 @@
             
           
             
-              <li class="toctree-l2"><a class="reference internal" href="/docs/httpd-format-plugin/">HTTPD Format Plugin</a></li>
-            
-          
-            
               <li class="toctree-l2"><a class="reference internal" href="/docs/opentsdb-storage-plugin/">OpenTSDB Storage Plugin</a></li>
             
           
@@ -499,19 +489,15 @@
             
           
             
-              <li class="toctree-l2"><a class="reference internal" href="/docs/image-metadata-format-plugin/">Image Metadata Format Plugin</a></li>
-            
-          
-            
               <li class="toctree-l2"><a class="reference internal" href="/docs/azure-blob-storage-plugin/">Azure Blob Storage Plugin</a></li>
             
           
             
-              <li class="toctree-l2"><a class="reference internal" href="/docs/syslog-format-plugin/">Syslog Format Plugin</a></li>
+              <li class="toctree-l2"><a class="reference internal" href="/docs/druid-storage-plugin/">Druid Storage Plugin</a></li>
             
           
             
-              <li class="toctree-l2"><a class="reference internal" href="/docs/ltsv-format-plugin/">LTSV Format Plugin</a></li>
+              <li class="toctree-l2"><a class="reference internal" href="/docs/http-storage-plugin/">HTTP Storage Plugin</a></li>
             
           
           </ul>
@@ -886,6 +872,12 @@
               
                 <li class="toctree-l3"><a class="reference internal" href="/docs/cryptography-functions/">Cryptography Functions</a></li>
               
+                <li class="toctree-l3"><a class="reference internal" href="/docs/sql-dialect-compatibility-functions/">SQL dialect compatibility functions</a></li>
+              
+                <li class="toctree-l3"><a class="reference internal" href="/docs/gis-functions/">GIS functions</a></li>
+              
+                <li class="toctree-l3"><a class="reference internal" href="/docs/time-series-analysis-functions/">Time Series Analysis Functions</a></li>
+              
             </ul>
             
           
@@ -1043,6 +1035,10 @@
             
           
             
+              <li class="toctree-l2"><a class="reference internal" href="/docs/logfile-plugin/">Logfile Plugin</a></li>
... 137227 lines suppressed ...