You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@drill.apache.org by ts...@apache.org on 2015/05/19 01:36:24 UTC

[01/31] drill git commit: new section configuring drill shell

Repository: drill
Updated Branches:
  refs/heads/gh-pages 1b7072c5d -> 3237426be


new section configuring drill shell


Project: http://git-wip-us.apache.org/repos/asf/drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/drill/commit/6c87ff04
Tree: http://git-wip-us.apache.org/repos/asf/drill/tree/6c87ff04
Diff: http://git-wip-us.apache.org/repos/asf/drill/diff/6c87ff04

Branch: refs/heads/gh-pages
Commit: 6c87ff04e06cccd0b80f03fc4b0deb4c43c373ee
Parents: 6ea0c7a
Author: Kristine Hahn <kh...@maprtech.com>
Authored: Sun May 17 11:19:22 2015 -0700
Committer: Kristine Hahn <kh...@maprtech.com>
Committed: Sun May 17 11:19:22 2015 -0700

----------------------------------------------------------------------
 _data/docs.json                                 | 71 ++++++++++++++---
 .../120-configuring-the-drill-shell.md          | 82 ++++++++++++++++++++
 2 files changed, 143 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/drill/blob/6c87ff04/_data/docs.json
----------------------------------------------------------------------
diff --git a/_data/docs.json b/_data/docs.json
index 2bf92ef..9813e92 100644
--- a/_data/docs.json
+++ b/_data/docs.json
@@ -1106,14 +1106,31 @@
                         }
                     ], 
                     "children": [], 
-                    "next_title": "Connect a Data Source", 
-                    "next_url": "/docs/connect-a-data-source/", 
+                    "next_title": "Configuring the Drill Shell", 
+                    "next_url": "/docs/configuring-the-drill-shell/", 
                     "parent": "Configure Drill", 
                     "previous_title": "Ports Used by Drill", 
                     "previous_url": "/docs/ports-used-by-drill/", 
                     "relative_path": "_docs/configure-drill/110-partition-pruning.md", 
                     "title": "Partition Pruning", 
                     "url": "/docs/partition-pruning/"
+                }, 
+                {
+                    "breadcrumbs": [
+                        {
+                            "title": "Configure Drill", 
+                            "url": "/docs/configure-drill/"
+                        }
+                    ], 
+                    "children": [], 
+                    "next_title": "Connect a Data Source", 
+                    "next_url": "/docs/connect-a-data-source/", 
+                    "parent": "Configure Drill", 
+                    "previous_title": "Partition Pruning", 
+                    "previous_url": "/docs/partition-pruning/", 
+                    "relative_path": "_docs/configure-drill/120-configuring-the-drill-shell.md", 
+                    "title": "Configuring the Drill Shell", 
+                    "url": "/docs/configuring-the-drill-shell/"
                 }
             ], 
             "next_title": "Configure Drill Introduction", 
@@ -1396,6 +1413,23 @@
             "title": "Configuring a Multitenant Cluster Introduction", 
             "url": "/docs/configuring-a-multitenant-cluster-introduction/"
         }, 
+        "Configuring the Drill Shell": {
+            "breadcrumbs": [
+                {
+                    "title": "Configure Drill", 
+                    "url": "/docs/configure-drill/"
+                }
+            ], 
+            "children": [], 
+            "next_title": "Connect a Data Source", 
+            "next_url": "/docs/connect-a-data-source/", 
+            "parent": "Configure Drill", 
+            "previous_title": "Partition Pruning", 
+            "previous_url": "/docs/partition-pruning/", 
+            "relative_path": "_docs/configure-drill/120-configuring-the-drill-shell.md", 
+            "title": "Configuring the Drill Shell", 
+            "url": "/docs/configuring-the-drill-shell/"
+        }, 
         "Connect a Data Source": {
             "breadcrumbs": [], 
             "children": [
@@ -1615,8 +1649,8 @@
             "next_title": "Connect a Data Source Introduction", 
             "next_url": "/docs/connect-a-data-source-introduction/", 
             "parent": "", 
-            "previous_title": "Partition Pruning", 
-            "previous_url": "/docs/partition-pruning/", 
+            "previous_title": "Configuring the Drill Shell", 
+            "previous_url": "/docs/configuring-the-drill-shell/", 
             "relative_path": "_docs/050-connect-a-data-source.md", 
             "title": "Connect a Data Source", 
             "url": "/docs/connect-a-data-source/"
@@ -4741,8 +4775,8 @@
                 }
             ], 
             "children": [], 
-            "next_title": "Connect a Data Source", 
-            "next_url": "/docs/connect-a-data-source/", 
+            "next_title": "Configuring the Drill Shell", 
+            "next_url": "/docs/configuring-the-drill-shell/", 
             "parent": "Configure Drill", 
             "previous_title": "Ports Used by Drill", 
             "previous_url": "/docs/ports-used-by-drill/", 
@@ -9775,14 +9809,31 @@
                         }
                     ], 
                     "children": [], 
-                    "next_title": "Connect a Data Source", 
-                    "next_url": "/docs/connect-a-data-source/", 
+                    "next_title": "Configuring the Drill Shell", 
+                    "next_url": "/docs/configuring-the-drill-shell/", 
                     "parent": "Configure Drill", 
                     "previous_title": "Ports Used by Drill", 
                     "previous_url": "/docs/ports-used-by-drill/", 
                     "relative_path": "_docs/configure-drill/110-partition-pruning.md", 
                     "title": "Partition Pruning", 
                     "url": "/docs/partition-pruning/"
+                }, 
+                {
+                    "breadcrumbs": [
+                        {
+                            "title": "Configure Drill", 
+                            "url": "/docs/configure-drill/"
+                        }
+                    ], 
+                    "children": [], 
+                    "next_title": "Connect a Data Source", 
+                    "next_url": "/docs/connect-a-data-source/", 
+                    "parent": "Configure Drill", 
+                    "previous_title": "Partition Pruning", 
+                    "previous_url": "/docs/partition-pruning/", 
+                    "relative_path": "_docs/configure-drill/120-configuring-the-drill-shell.md", 
+                    "title": "Configuring the Drill Shell", 
+                    "url": "/docs/configuring-the-drill-shell/"
                 }
             ], 
             "next_title": "Configure Drill Introduction", 
@@ -10013,8 +10064,8 @@
             "next_title": "Connect a Data Source Introduction", 
             "next_url": "/docs/connect-a-data-source-introduction/", 
             "parent": "", 
-            "previous_title": "Partition Pruning", 
-            "previous_url": "/docs/partition-pruning/", 
+            "previous_title": "Configuring the Drill Shell", 
+            "previous_url": "/docs/configuring-the-drill-shell/", 
             "relative_path": "_docs/050-connect-a-data-source.md", 
             "title": "Connect a Data Source", 
             "url": "/docs/connect-a-data-source/"

http://git-wip-us.apache.org/repos/asf/drill/blob/6c87ff04/_docs/configure-drill/120-configuring-the-drill-shell.md
----------------------------------------------------------------------
diff --git a/_docs/configure-drill/120-configuring-the-drill-shell.md b/_docs/configure-drill/120-configuring-the-drill-shell.md
new file mode 100644
index 0000000..b9ff103
--- /dev/null
+++ b/_docs/configure-drill/120-configuring-the-drill-shell.md
@@ -0,0 +1,82 @@
+---
+title: "Configuring the Drill Shell"
+parent: "Configure Drill"
+---
+At the Drill shell command prompt, typing "help" lists the configuration and other options you can set to manage shell functionality. Apache Drill 1.0 formats the resultset output tables for readability if possible. In this release, columns having 70 characters or more cannot be formatted. This document formats all output for readability and example purposes.
+
+Formatting tables takes time, which you might notice if running a huge query using the default `outputFormat` setting, which is `table` of the Drill shell. You can set another, more performant table formatting such as `csv`, as shown in the [examples]({{site.baseurl}}/docs/configuring-the-drill-shell/#examples-of-configuring-the-drill-shell). 
+
+
+## Drill Shell Commands
+
+The following table lists the commands that you can run on the Drill command line.
+
+| Command       | Description                                                                                             |
+|---------------|---------------------------------------------------------------------------------------------------------|
+| !brief        | Set verbose mode off.                                                                                   |
+| !close        | Close the current connection to the database.                                                           |
+| !closeall     | Close all current open connections.                                                                     |
+| !connect      | Open a new connection to the database.                                                                  |
+| !help         | Print a summary of command usage.                                                                       |
+| !history      | Display the command history.                                                                            |
+| !list         | List the current connections.                                                                           |
+| !outputformat | Set the output format for displaying results.                                                           |
+| !properties   | Connect to the database specified in the properties file(s).                                            |
+| !quit         | Exits the Drill shell.                                                                                  |
+| !reconnect    | Reconnect to the database.                                                                              |
+| !record       | Record all output to the specified file.                                                                |
+| !run          | Run a script from the specified file.                                                                   |
+| !save         | Save the current variables and aliases.                                                                 |
+| !script       | Start saving a script to a file.                                                                        |
+| !set          | Set a [sqlline variable]({{site.baseurl}}/docs/configuring-the-drill-shell/#the-set-command-variables). |
+| !tables       | List all the tables in the database.                                                                    |
+| !verbose      | Show unabbreviated error messages.                                                                      |
+
+## Examples of Configuring the Drill Shell
+
+For example, quit the Drill shell:
+
+    0: jdbc:drill:zk=local> !quit
+
+List the current connections. 
+
+    0: jdbc:drill:zk=local> !list
+    1 active connection:
+     #0  open     jdbc:drill:zk=local
+
+Set the maximum width of the Drill shell to 10000.
+
+     0: jdbc:drill:zk=local> !set maxwidth 10000
+
+Set the output format to CSV to improve performance of a huge query.
+
+     0: jdbc:drill:zk=local> !set outputFormat csv
+
+## The Set Command Variables
+
+| Variable Name   | Valid Variable Values  | Description                                                                                                                                                          |
+|-----------------|------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| autoCommit      | true/false             | Enable/disable automatic transaction commit.                                                                                                                         |
+| autoSave        | true/false             | Automatically save preferences.                                                                                                                                      |
+| color           | true/false             | Control whether color is used for display.                                                                                                                           |
+| fastConnect     | true/false             | Skip building table/column list for tab-completion.                                                                                                                  |
+| force           | true/false             | Continue running script even after errors.                                                                                                                           |
+| headerInterval  | \<integer\>            | The interval between which headers are displayed.                                                                                                                    |
+| historyFile     | \<path\>               | File in which to save command history. Default is $HOME/.sqlline/history (UNIX, Linux, Mac OS), $HOME/sqlline/history (Windows).                                     |
+| incremental     | true/false             | Do not receive all rows from server before printing the first row. Uses fewer resources, especially for long-running queries, but column widths may be incorrect.    |
+| isolation       | \<level\>              | Set transaction isolation level.                                                                                                                                     |
+| maxColumnWidth  | \<integer\>            | Maximum width for displaying columns.                                                                                                                                |
+| maxHeight       | \<integer\>            | Maximum height of the terminal.                                                                                                                                      |
+| maxWidth        | \<integer\>            | Maximum width of the terminal.                                                                                                                                       |
+| numberFormat    | \<pattern\>            | Format numbers using DecimalFormat pattern.                                                                                                                          |
+| outputFormat    | table/vertical/csv/tsv | Format mode for result display.                                                                                                                                      |
+| properties      | \<path\>               | File from which SqlLine reads properties on startup. Default is $HOME/.sqlline/sqlline.properties (UNIX, Linux, Mac OS), $HOME/sqlline/sqlline.properties (Windows). |
+| rowLimit        | \<integer\>            | Maximum number of rows returned from a query; zero means no limit.                                                                                                   |
+| showElapsedTime | true/false             | Display execution time when verbose.                                                                                                                                 |
+| showHeader      | true/false             | Show column names in query results.                                                                                                                                  |
+| showNestedErrs  | true/false             | Display nested errors.                                                                                                                                               |
+| showWarnings    | true/false             | Display connection warnings.                                                                                                                                         |
+| silent          | true/false             | Disable or enable showing information specified by show commands.                                                                                                    |
+| timeout         | \<integer\>            | Query timeout in seconds; less than zero means no timeout.                                                                                                           |
+| trimScripts     | true/false             | Remove trailing spaces from lines read from script files.                                                                                                            |
+| verbose         | true/false             | Show unabbreviated error messages and debug info.                                                                                                                    |
\ No newline at end of file


[08/31] drill git commit: add perf tune structure and links, drillbit_queries.json, remove videos

Posted by ts...@apache.org.
add perf tune structure and links, drillbit_queries.json, remove videos


Project: http://git-wip-us.apache.org/repos/asf/drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/drill/commit/cd0a0e96
Tree: http://git-wip-us.apache.org/repos/asf/drill/tree/cd0a0e96
Diff: http://git-wip-us.apache.org/repos/asf/drill/diff/cd0a0e96

Branch: refs/heads/gh-pages
Commit: cd0a0e96870e942bf5d1465738502efa9a3aecd5
Parents: 99af7ad
Author: Kristine Hahn <kh...@maprtech.com>
Authored: Sun May 17 17:08:53 2015 -0700
Committer: Kristine Hahn <kh...@maprtech.com>
Committed: Sun May 17 17:08:53 2015 -0700

----------------------------------------------------------------------
 _data/docs.json                                 | 440 ++++++++-----------
 _docs/072-performance-tuning.md                 |   5 +
 _docs/075-getting-query-information.md          |   5 +-
 .../010-architecture-introduction.md            |  45 +-
 .../030-architectural-highlights.md             |   5 -
 _docs/architecture/030-performance.md           |  55 +++
 .../architectural-highlights/010-flexibility.md |  84 ----
 .../architectural-highlights/020-performance.md |  55 ---
 _docs/archived-pages/030-partition-pruning.md   |  75 ++++
 .../050-configuring-multitenant-resources.md    |   2 +-
 .../060-configuring-a-shared-drillbit.md        |   2 +-
 _docs/configure-drill/110-partition-pruning.md  |  75 ----
 .../120-configuring-the-drill-shell.md          |  92 ++--
 .../090-mongodb-plugin-for-apache-drill.md      |  50 +--
 _docs/getting-started/010-drill-introduction.md |  33 +-
 _docs/getting-started/020-why-drill.md          |  36 +-
 .../010-performance-tuning-introduction.md      |  17 +
 17 files changed, 467 insertions(+), 609 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/drill/blob/cd0a0e96/_data/docs.json
----------------------------------------------------------------------
diff --git a/_data/docs.json b/_data/docs.json
index 9813e92..6f8cde1 100644
--- a/_data/docs.json
+++ b/_data/docs.json
@@ -389,66 +389,6 @@
             "title": "Apache Drill M1 Release Notes (Apache Drill Alpha)", 
             "url": "/docs/apache-drill-m1-release-notes-apache-drill-alpha/"
         }, 
-        "Architectural Highlights": {
-            "breadcrumbs": [
-                {
-                    "title": "Architecture", 
-                    "url": "/docs/architecture/"
-                }
-            ], 
-            "children": [
-                {
-                    "breadcrumbs": [
-                        {
-                            "title": "Architectural Highlights", 
-                            "url": "/docs/architectural-highlights/"
-                        }, 
-                        {
-                            "title": "Architecture", 
-                            "url": "/docs/architecture/"
-                        }
-                    ], 
-                    "children": [], 
-                    "next_title": "Performance", 
-                    "next_url": "/docs/performance/", 
-                    "parent": "Architectural Highlights", 
-                    "previous_title": "Architectural Highlights", 
-                    "previous_url": "/docs/architectural-highlights/", 
-                    "relative_path": "_docs/architecture/architectural-highlights/010-flexibility.md", 
-                    "title": "Flexibility", 
-                    "url": "/docs/flexibility/"
-                }, 
-                {
-                    "breadcrumbs": [
-                        {
-                            "title": "Architectural Highlights", 
-                            "url": "/docs/architectural-highlights/"
-                        }, 
-                        {
-                            "title": "Architecture", 
-                            "url": "/docs/architecture/"
-                        }
-                    ], 
-                    "children": [], 
-                    "next_title": "Tutorials", 
-                    "next_url": "/docs/tutorials/", 
-                    "parent": "Architectural Highlights", 
-                    "previous_title": "Flexibility", 
-                    "previous_url": "/docs/flexibility/", 
-                    "relative_path": "_docs/architecture/architectural-highlights/020-performance.md", 
-                    "title": "Performance", 
-                    "url": "/docs/performance/"
-                }
-            ], 
-            "next_title": "Flexibility", 
-            "next_url": "/docs/flexibility/", 
-            "parent": "Architecture", 
-            "previous_title": "Core Modules", 
-            "previous_url": "/docs/core-modules/", 
-            "relative_path": "_docs/architecture/030-architectural-highlights.md", 
-            "title": "Architectural Highlights", 
-            "url": "/docs/architectural-highlights/"
-        }, 
         "Architecture": {
             "breadcrumbs": [], 
             "children": [
@@ -477,8 +417,8 @@
                         }
                     ], 
                     "children": [], 
-                    "next_title": "Architectural Highlights", 
-                    "next_url": "/docs/architectural-highlights/", 
+                    "next_title": "Performance", 
+                    "next_url": "/docs/performance/", 
                     "parent": "Architecture", 
                     "previous_title": "Architecture Introduction", 
                     "previous_url": "/docs/architecture-introduction/", 
@@ -493,58 +433,15 @@
                             "url": "/docs/architecture/"
                         }
                     ], 
-                    "children": [
-                        {
-                            "breadcrumbs": [
-                                {
-                                    "title": "Architectural Highlights", 
-                                    "url": "/docs/architectural-highlights/"
-                                }, 
-                                {
-                                    "title": "Architecture", 
-                                    "url": "/docs/architecture/"
-                                }
-                            ], 
-                            "children": [], 
-                            "next_title": "Performance", 
-                            "next_url": "/docs/performance/", 
-                            "parent": "Architectural Highlights", 
-                            "previous_title": "Architectural Highlights", 
-                            "previous_url": "/docs/architectural-highlights/", 
-                            "relative_path": "_docs/architecture/architectural-highlights/010-flexibility.md", 
-                            "title": "Flexibility", 
-                            "url": "/docs/flexibility/"
-                        }, 
-                        {
-                            "breadcrumbs": [
-                                {
-                                    "title": "Architectural Highlights", 
-                                    "url": "/docs/architectural-highlights/"
-                                }, 
-                                {
-                                    "title": "Architecture", 
-                                    "url": "/docs/architecture/"
-                                }
-                            ], 
-                            "children": [], 
-                            "next_title": "Tutorials", 
-                            "next_url": "/docs/tutorials/", 
-                            "parent": "Architectural Highlights", 
-                            "previous_title": "Flexibility", 
-                            "previous_url": "/docs/flexibility/", 
-                            "relative_path": "_docs/architecture/architectural-highlights/020-performance.md", 
-                            "title": "Performance", 
-                            "url": "/docs/performance/"
-                        }
-                    ], 
-                    "next_title": "Flexibility", 
-                    "next_url": "/docs/flexibility/", 
+                    "children": [], 
+                    "next_title": "Tutorials", 
+                    "next_url": "/docs/tutorials/", 
                     "parent": "Architecture", 
                     "previous_title": "Core Modules", 
                     "previous_url": "/docs/core-modules/", 
-                    "relative_path": "_docs/architecture/030-architectural-highlights.md", 
-                    "title": "Architectural Highlights", 
-                    "url": "/docs/architectural-highlights/"
+                    "relative_path": "_docs/architecture/030-performance.md", 
+                    "title": "Performance", 
+                    "url": "/docs/performance/"
                 }
             ], 
             "next_title": "Architecture Introduction", 
@@ -601,14 +498,31 @@
                         }
                     ], 
                     "children": [], 
-                    "next_title": "Progress Reports", 
-                    "next_url": "/docs/progress-reports/", 
+                    "next_title": "Partition Pruning", 
+                    "next_url": "/docs/partition-pruning/", 
                     "parent": "Archived Pages", 
                     "previous_title": "How to Run the Drill Demo", 
                     "previous_url": "/docs/how-to-run-the-drill-demo/", 
                     "relative_path": "_docs/archived-pages/020-what-is-apache-drill.md", 
                     "title": "What is Apache Drill", 
                     "url": "/docs/what-is-apache-drill/"
+                }, 
+                {
+                    "breadcrumbs": [
+                        {
+                            "title": "Archived Pages", 
+                            "url": "/docs/archived-pages/"
+                        }
+                    ], 
+                    "children": [], 
+                    "next_title": "Progress Reports", 
+                    "next_url": "/docs/progress-reports/", 
+                    "parent": "Archived Pages", 
+                    "previous_title": "What is Apache Drill", 
+                    "previous_url": "/docs/what-is-apache-drill/", 
+                    "relative_path": "_docs/archived-pages/030-partition-pruning.md", 
+                    "title": "Partition Pruning", 
+                    "url": "/docs/partition-pruning/"
                 }
             ], 
             "next_title": "How to Run the Drill Demo", 
@@ -1089,8 +1003,8 @@
                         }
                     ], 
                     "children": [], 
-                    "next_title": "Partition Pruning", 
-                    "next_url": "/docs/partition-pruning/", 
+                    "next_title": "Configuring the Drill Shell", 
+                    "next_url": "/docs/configuring-the-drill-shell/", 
                     "parent": "Configure Drill", 
                     "previous_title": "Persistent Configuration Storage", 
                     "previous_url": "/docs/persistent-configuration-storage/", 
@@ -1106,28 +1020,11 @@
                         }
                     ], 
                     "children": [], 
-                    "next_title": "Configuring the Drill Shell", 
-                    "next_url": "/docs/configuring-the-drill-shell/", 
-                    "parent": "Configure Drill", 
-                    "previous_title": "Ports Used by Drill", 
-                    "previous_url": "/docs/ports-used-by-drill/", 
-                    "relative_path": "_docs/configure-drill/110-partition-pruning.md", 
-                    "title": "Partition Pruning", 
-                    "url": "/docs/partition-pruning/"
-                }, 
-                {
-                    "breadcrumbs": [
-                        {
-                            "title": "Configure Drill", 
-                            "url": "/docs/configure-drill/"
-                        }
-                    ], 
-                    "children": [], 
                     "next_title": "Connect a Data Source", 
                     "next_url": "/docs/connect-a-data-source/", 
                     "parent": "Configure Drill", 
-                    "previous_title": "Partition Pruning", 
-                    "previous_url": "/docs/partition-pruning/", 
+                    "previous_title": "Ports Used by Drill", 
+                    "previous_url": "/docs/ports-used-by-drill/", 
                     "relative_path": "_docs/configure-drill/120-configuring-the-drill-shell.md", 
                     "title": "Configuring the Drill Shell", 
                     "url": "/docs/configuring-the-drill-shell/"
@@ -1424,8 +1321,8 @@
             "next_title": "Connect a Data Source", 
             "next_url": "/docs/connect-a-data-source/", 
             "parent": "Configure Drill", 
-            "previous_title": "Partition Pruning", 
-            "previous_url": "/docs/partition-pruning/", 
+            "previous_title": "Ports Used by Drill", 
+            "previous_url": "/docs/ports-used-by-drill/", 
             "relative_path": "_docs/configure-drill/120-configuring-the-drill-shell.md", 
             "title": "Configuring the Drill Shell", 
             "url": "/docs/configuring-the-drill-shell/"
@@ -1761,8 +1658,8 @@
                 }
             ], 
             "children": [], 
-            "next_title": "Architectural Highlights", 
-            "next_url": "/docs/architectural-highlights/", 
+            "next_title": "Performance", 
+            "next_url": "/docs/performance/", 
             "parent": "Architecture", 
             "previous_title": "Architecture Introduction", 
             "previous_url": "/docs/architecture-introduction/", 
@@ -2929,27 +2826,6 @@
             "title": "File System Storage Plugin", 
             "url": "/docs/file-system-storage-plugin/"
         }, 
-        "Flexibility": {
-            "breadcrumbs": [
-                {
-                    "title": "Architectural Highlights", 
-                    "url": "/docs/architectural-highlights/"
-                }, 
-                {
-                    "title": "Architecture", 
-                    "url": "/docs/architecture/"
-                }
-            ], 
-            "children": [], 
-            "next_title": "Performance", 
-            "next_url": "/docs/performance/", 
-            "parent": "Architectural Highlights", 
-            "previous_title": "Architectural Highlights", 
-            "previous_url": "/docs/architectural-highlights/", 
-            "relative_path": "_docs/architecture/architectural-highlights/010-flexibility.md", 
-            "title": "Flexibility", 
-            "url": "/docs/flexibility/"
-        }, 
         "Functions for Handling Nulls": {
             "breadcrumbs": [
                 {
@@ -4118,8 +3994,8 @@
                 }
             ], 
             "children": [], 
-            "next_title": "Query Audit Logging", 
-            "next_url": "/docs/query-audit-logging/", 
+            "next_title": "Performance Tuning", 
+            "next_url": "/docs/performance-tuning/", 
             "parent": "Query Data", 
             "previous_title": "Querying System Tables", 
             "previous_url": "/docs/querying-system-tables/", 
@@ -4770,27 +4646,23 @@
         "Partition Pruning": {
             "breadcrumbs": [
                 {
-                    "title": "Configure Drill", 
-                    "url": "/docs/configure-drill/"
+                    "title": "Archived Pages", 
+                    "url": "/docs/archived-pages/"
                 }
             ], 
             "children": [], 
-            "next_title": "Configuring the Drill Shell", 
-            "next_url": "/docs/configuring-the-drill-shell/", 
-            "parent": "Configure Drill", 
-            "previous_title": "Ports Used by Drill", 
-            "previous_url": "/docs/ports-used-by-drill/", 
-            "relative_path": "_docs/configure-drill/110-partition-pruning.md", 
+            "next_title": "Progress Reports", 
+            "next_url": "/docs/progress-reports/", 
+            "parent": "Archived Pages", 
+            "previous_title": "What is Apache Drill", 
+            "previous_url": "/docs/what-is-apache-drill/", 
+            "relative_path": "_docs/archived-pages/030-partition-pruning.md", 
             "title": "Partition Pruning", 
             "url": "/docs/partition-pruning/"
         }, 
         "Performance": {
             "breadcrumbs": [
                 {
-                    "title": "Architectural Highlights", 
-                    "url": "/docs/architectural-highlights/"
-                }, 
-                {
                     "title": "Architecture", 
                     "url": "/docs/architecture/"
                 }
@@ -4798,13 +4670,60 @@
             "children": [], 
             "next_title": "Tutorials", 
             "next_url": "/docs/tutorials/", 
-            "parent": "Architectural Highlights", 
-            "previous_title": "Flexibility", 
-            "previous_url": "/docs/flexibility/", 
-            "relative_path": "_docs/architecture/architectural-highlights/020-performance.md", 
+            "parent": "Architecture", 
+            "previous_title": "Core Modules", 
+            "previous_url": "/docs/core-modules/", 
+            "relative_path": "_docs/architecture/030-performance.md", 
             "title": "Performance", 
             "url": "/docs/performance/"
         }, 
+        "Performance Tuning": {
+            "breadcrumbs": [], 
+            "children": [
+                {
+                    "breadcrumbs": [
+                        {
+                            "title": "Performance Tuning", 
+                            "url": "/docs/performance-tuning/"
+                        }
+                    ], 
+                    "children": [], 
+                    "next_title": "Query Audit Logging", 
+                    "next_url": "/docs/query-audit-logging/", 
+                    "parent": "Performance Tuning", 
+                    "previous_title": "Performance Tuning", 
+                    "previous_url": "/docs/performance-tuning/", 
+                    "relative_path": "_docs/performance-tuning/010-performance-tuning-introduction.md", 
+                    "title": "Performance Tuning Introduction", 
+                    "url": "/docs/performance-tuning-introduction/"
+                }
+            ], 
+            "next_title": "Performance Tuning Introduction", 
+            "next_url": "/docs/performance-tuning-introduction/", 
+            "parent": "", 
+            "previous_title": "Monitoring and Canceling Queries in the Drill Web UI", 
+            "previous_url": "/docs/monitoring-and-canceling-queries-in-the-drill-web-ui/", 
+            "relative_path": "_docs/072-performance-tuning.md", 
+            "title": "Performance Tuning", 
+            "url": "/docs/performance-tuning/"
+        }, 
+        "Performance Tuning Introduction": {
+            "breadcrumbs": [
+                {
+                    "title": "Performance Tuning", 
+                    "url": "/docs/performance-tuning/"
+                }
+            ], 
+            "children": [], 
+            "next_title": "Query Audit Logging", 
+            "next_url": "/docs/query-audit-logging/", 
+            "parent": "Performance Tuning", 
+            "previous_title": "Performance Tuning", 
+            "previous_url": "/docs/performance-tuning/", 
+            "relative_path": "_docs/performance-tuning/010-performance-tuning-introduction.md", 
+            "title": "Performance Tuning Introduction", 
+            "url": "/docs/performance-tuning-introduction/"
+        }, 
         "Persistent Configuration Storage": {
             "breadcrumbs": [
                 {
@@ -4876,8 +4795,8 @@
                 }
             ], 
             "children": [], 
-            "next_title": "Partition Pruning", 
-            "next_url": "/docs/partition-pruning/", 
+            "next_title": "Configuring the Drill Shell", 
+            "next_url": "/docs/configuring-the-drill-shell/", 
             "parent": "Configure Drill", 
             "previous_title": "Persistent Configuration Storage", 
             "previous_url": "/docs/persistent-configuration-storage/", 
@@ -4909,8 +4828,8 @@
             "next_title": "2014 Q1 Drill Report", 
             "next_url": "/docs/2014-q1-drill-report/", 
             "parent": "", 
-            "previous_title": "What is Apache Drill", 
-            "previous_url": "/docs/what-is-apache-drill/", 
+            "previous_title": "Partition Pruning", 
+            "previous_url": "/docs/partition-pruning/", 
             "relative_path": "_docs/160-progress-reports.md", 
             "title": "Progress Reports", 
             "url": "/docs/progress-reports/"
@@ -4951,8 +4870,8 @@
             "next_title": "Getting Query Information", 
             "next_url": "/docs/getting-query-information/", 
             "parent": "", 
-            "previous_title": "Monitoring and Canceling Queries in the Drill Web UI", 
-            "previous_url": "/docs/monitoring-and-canceling-queries-in-the-drill-web-ui/", 
+            "previous_title": "Performance Tuning Introduction", 
+            "previous_url": "/docs/performance-tuning-introduction/", 
             "relative_path": "_docs/074-query-audit-logging.md", 
             "title": "Query Audit Logging", 
             "url": "/docs/query-audit-logging/"
@@ -5320,8 +5239,8 @@
                         }
                     ], 
                     "children": [], 
-                    "next_title": "Query Audit Logging", 
-                    "next_url": "/docs/query-audit-logging/", 
+                    "next_title": "Performance Tuning", 
+                    "next_url": "/docs/performance-tuning/", 
                     "parent": "Query Data", 
                     "previous_title": "Querying System Tables", 
                     "previous_url": "/docs/querying-system-tables/", 
@@ -8813,8 +8732,8 @@
                 }
             ], 
             "children": [], 
-            "next_title": "Progress Reports", 
-            "next_url": "/docs/progress-reports/", 
+            "next_title": "Partition Pruning", 
+            "next_url": "/docs/partition-pruning/", 
             "parent": "Archived Pages", 
             "previous_title": "How to Run the Drill Demo", 
             "previous_url": "/docs/how-to-run-the-drill-demo/", 
@@ -8954,8 +8873,8 @@
                         }
                     ], 
                     "children": [], 
-                    "next_title": "Architectural Highlights", 
-                    "next_url": "/docs/architectural-highlights/", 
+                    "next_title": "Performance", 
+                    "next_url": "/docs/performance/", 
                     "parent": "Architecture", 
                     "previous_title": "Architecture Introduction", 
                     "previous_url": "/docs/architecture-introduction/", 
@@ -8970,58 +8889,15 @@
                             "url": "/docs/architecture/"
                         }
                     ], 
-                    "children": [
-                        {
-                            "breadcrumbs": [
-                                {
-                                    "title": "Architectural Highlights", 
-                                    "url": "/docs/architectural-highlights/"
-                                }, 
-                                {
-                                    "title": "Architecture", 
-                                    "url": "/docs/architecture/"
-                                }
-                            ], 
-                            "children": [], 
-                            "next_title": "Performance", 
-                            "next_url": "/docs/performance/", 
-                            "parent": "Architectural Highlights", 
-                            "previous_title": "Architectural Highlights", 
-                            "previous_url": "/docs/architectural-highlights/", 
-                            "relative_path": "_docs/architecture/architectural-highlights/010-flexibility.md", 
-                            "title": "Flexibility", 
-                            "url": "/docs/flexibility/"
-                        }, 
-                        {
-                            "breadcrumbs": [
-                                {
-                                    "title": "Architectural Highlights", 
-                                    "url": "/docs/architectural-highlights/"
-                                }, 
-                                {
-                                    "title": "Architecture", 
-                                    "url": "/docs/architecture/"
-                                }
-                            ], 
-                            "children": [], 
-                            "next_title": "Tutorials", 
-                            "next_url": "/docs/tutorials/", 
-                            "parent": "Architectural Highlights", 
-                            "previous_title": "Flexibility", 
-                            "previous_url": "/docs/flexibility/", 
-                            "relative_path": "_docs/architecture/architectural-highlights/020-performance.md", 
-                            "title": "Performance", 
-                            "url": "/docs/performance/"
-                        }
-                    ], 
-                    "next_title": "Flexibility", 
-                    "next_url": "/docs/flexibility/", 
+                    "children": [], 
+                    "next_title": "Tutorials", 
+                    "next_url": "/docs/tutorials/", 
                     "parent": "Architecture", 
                     "previous_title": "Core Modules", 
                     "previous_url": "/docs/core-modules/", 
-                    "relative_path": "_docs/architecture/030-architectural-highlights.md", 
-                    "title": "Architectural Highlights", 
-                    "url": "/docs/architectural-highlights/"
+                    "relative_path": "_docs/architecture/030-performance.md", 
+                    "title": "Performance", 
+                    "url": "/docs/performance/"
                 }
             ], 
             "next_title": "Architecture Introduction", 
@@ -9792,8 +9668,8 @@
                         }
                     ], 
                     "children": [], 
-                    "next_title": "Partition Pruning", 
-                    "next_url": "/docs/partition-pruning/", 
+                    "next_title": "Configuring the Drill Shell", 
+                    "next_url": "/docs/configuring-the-drill-shell/", 
                     "parent": "Configure Drill", 
                     "previous_title": "Persistent Configuration Storage", 
                     "previous_url": "/docs/persistent-configuration-storage/", 
@@ -9809,28 +9685,11 @@
                         }
                     ], 
                     "children": [], 
-                    "next_title": "Configuring the Drill Shell", 
-                    "next_url": "/docs/configuring-the-drill-shell/", 
-                    "parent": "Configure Drill", 
-                    "previous_title": "Ports Used by Drill", 
-                    "previous_url": "/docs/ports-used-by-drill/", 
-                    "relative_path": "_docs/configure-drill/110-partition-pruning.md", 
-                    "title": "Partition Pruning", 
-                    "url": "/docs/partition-pruning/"
-                }, 
-                {
-                    "breadcrumbs": [
-                        {
-                            "title": "Configure Drill", 
-                            "url": "/docs/configure-drill/"
-                        }
-                    ], 
-                    "children": [], 
                     "next_title": "Connect a Data Source", 
                     "next_url": "/docs/connect-a-data-source/", 
                     "parent": "Configure Drill", 
-                    "previous_title": "Partition Pruning", 
-                    "previous_url": "/docs/partition-pruning/", 
+                    "previous_title": "Ports Used by Drill", 
+                    "previous_url": "/docs/ports-used-by-drill/", 
                     "relative_path": "_docs/configure-drill/120-configuring-the-drill-shell.md", 
                     "title": "Configuring the Drill Shell", 
                     "url": "/docs/configuring-the-drill-shell/"
@@ -10874,8 +10733,8 @@
                         }
                     ], 
                     "children": [], 
-                    "next_title": "Query Audit Logging", 
-                    "next_url": "/docs/query-audit-logging/", 
+                    "next_title": "Performance Tuning", 
+                    "next_url": "/docs/performance-tuning/", 
                     "parent": "Query Data", 
                     "previous_title": "Querying System Tables", 
                     "previous_url": "/docs/querying-system-tables/", 
@@ -10899,6 +10758,36 @@
                 {
                     "breadcrumbs": [
                         {
+                            "title": "Performance Tuning", 
+                            "url": "/docs/performance-tuning/"
+                        }
+                    ], 
+                    "children": [], 
+                    "next_title": "Query Audit Logging", 
+                    "next_url": "/docs/query-audit-logging/", 
+                    "parent": "Performance Tuning", 
+                    "previous_title": "Performance Tuning", 
+                    "previous_url": "/docs/performance-tuning/", 
+                    "relative_path": "_docs/performance-tuning/010-performance-tuning-introduction.md", 
+                    "title": "Performance Tuning Introduction", 
+                    "url": "/docs/performance-tuning-introduction/"
+                }
+            ], 
+            "next_title": "Performance Tuning Introduction", 
+            "next_url": "/docs/performance-tuning-introduction/", 
+            "parent": "", 
+            "previous_title": "Monitoring and Canceling Queries in the Drill Web UI", 
+            "previous_url": "/docs/monitoring-and-canceling-queries-in-the-drill-web-ui/", 
+            "relative_path": "_docs/072-performance-tuning.md", 
+            "title": "Performance Tuning", 
+            "url": "/docs/performance-tuning/"
+        }, 
+        {
+            "breadcrumbs": [], 
+            "children": [
+                {
+                    "breadcrumbs": [
+                        {
                             "title": "Query Audit Logging", 
                             "url": "/docs/query-audit-logging/"
                         }
@@ -10917,8 +10806,8 @@
             "next_title": "Getting Query Information", 
             "next_url": "/docs/getting-query-information/", 
             "parent": "", 
-            "previous_title": "Monitoring and Canceling Queries in the Drill Web UI", 
-            "previous_url": "/docs/monitoring-and-canceling-queries-in-the-drill-web-ui/", 
+            "previous_title": "Performance Tuning Introduction", 
+            "previous_url": "/docs/performance-tuning-introduction/", 
             "relative_path": "_docs/074-query-audit-logging.md", 
             "title": "Query Audit Logging", 
             "url": "/docs/query-audit-logging/"
@@ -12426,14 +12315,31 @@
                         }
                     ], 
                     "children": [], 
-                    "next_title": "Progress Reports", 
-                    "next_url": "/docs/progress-reports/", 
+                    "next_title": "Partition Pruning", 
+                    "next_url": "/docs/partition-pruning/", 
                     "parent": "Archived Pages", 
                     "previous_title": "How to Run the Drill Demo", 
                     "previous_url": "/docs/how-to-run-the-drill-demo/", 
                     "relative_path": "_docs/archived-pages/020-what-is-apache-drill.md", 
                     "title": "What is Apache Drill", 
                     "url": "/docs/what-is-apache-drill/"
+                }, 
+                {
+                    "breadcrumbs": [
+                        {
+                            "title": "Archived Pages", 
+                            "url": "/docs/archived-pages/"
+                        }
+                    ], 
+                    "children": [], 
+                    "next_title": "Progress Reports", 
+                    "next_url": "/docs/progress-reports/", 
+                    "parent": "Archived Pages", 
+                    "previous_title": "What is Apache Drill", 
+                    "previous_url": "/docs/what-is-apache-drill/", 
+                    "relative_path": "_docs/archived-pages/030-partition-pruning.md", 
+                    "title": "Partition Pruning", 
+                    "url": "/docs/partition-pruning/"
                 }
             ], 
             "next_title": "How to Run the Drill Demo", 
@@ -12469,8 +12375,8 @@
             "next_title": "2014 Q1 Drill Report", 
             "next_url": "/docs/2014-q1-drill-report/", 
             "parent": "", 
-            "previous_title": "What is Apache Drill", 
-            "previous_url": "/docs/what-is-apache-drill/", 
+            "previous_title": "Partition Pruning", 
+            "previous_url": "/docs/partition-pruning/", 
             "relative_path": "_docs/160-progress-reports.md", 
             "title": "Progress Reports", 
             "url": "/docs/progress-reports/"

http://git-wip-us.apache.org/repos/asf/drill/blob/cd0a0e96/_docs/072-performance-tuning.md
----------------------------------------------------------------------
diff --git a/_docs/072-performance-tuning.md b/_docs/072-performance-tuning.md
new file mode 100644
index 0000000..10e346b
--- /dev/null
+++ b/_docs/072-performance-tuning.md
@@ -0,0 +1,5 @@
+---
+title: "Performance Tuning"
+---
+
+

http://git-wip-us.apache.org/repos/asf/drill/blob/cd0a0e96/_docs/075-getting-query-information.md
----------------------------------------------------------------------
diff --git a/_docs/075-getting-query-information.md b/_docs/075-getting-query-information.md
index 51c7fe4..58e0a62 100644
--- a/_docs/075-getting-query-information.md
+++ b/_docs/075-getting-query-information.md
@@ -4,7 +4,10 @@ parent: "Query Audit Logging"
 ---
 The query log provides audit log functionality for the queries executed by various drillbits in the cluster. To access the query log, go to `sqlline_queries.json` file in the `log` directory of the Drill installation. The log records important information about queries executed on the Drillbit where Drill runs. The log includes query text, start time, end time, user, status, schema, and the query id.
 
-You can query the `sqlline_queries.json` using Drill to get audit logging information.
+You can query one of the following files, depending on whether you run Drill in embedded or distributed mode, to get audit logging information.:
+
+* `sqlline_queries.json` (embedded mode) 
+* `drillbit_queries.json` (distributed mode)
 
 ## Checking the Most Recent Queries
 

http://git-wip-us.apache.org/repos/asf/drill/blob/cd0a0e96/_docs/architecture/010-architecture-introduction.md
----------------------------------------------------------------------
diff --git a/_docs/architecture/010-architecture-introduction.md b/_docs/architecture/010-architecture-introduction.md
index 45fad19..e80af26 100644
--- a/_docs/architecture/010-architecture-introduction.md
+++ b/_docs/architecture/010-architecture-introduction.md
@@ -48,8 +48,51 @@ The flow of a Drill query typically involves the following steps:
 
 You can access Drill through the following interfaces:
 
-  * [Drill shell]({{ site.baseurl }}/docs/install-drill)
+  * [Drill shell]({{ site.baseurl }}/docs/configuring-the-drill-shell/)
   * [Drill Web UI]({{ site.baseurl }}/docs/monitoring-and-canceling-queries-in-the-drill-web-ui)
   * [ODBC/JDBC]({{ site.baseurl }}/docs/odbc-jdbc-interfaces/#using-odbc-to-access-apache-drill-from-bi-tools) 
   * C++ API
 
+### **_Dynamic schema discovery_**
+
+Drill does not require schema or type specification for data in order to start
+the query execution process. Drill starts data processing in record-batches
+and discovers the schema during processing. Self-describing data formats such
+as Parquet, JSON, AVRO, and NoSQL databases have schema specified as part of
+the data itself, which Drill leverages dynamically at query time. Because
+schema can change over the course of a Drill query, all Drill operators are
+designed to reconfigure themselves when schemas change.
+
+### **_Flexible data model_**
+
+Drill allows access to nested data attributes, just like SQL columns, and
+provides intuitive extensions to easily operate on them. From an architectural
+point of view, Drill provides a flexible hierarchical columnar data model that
+can represent complex, highly dynamic and evolving data models. Drill allows
+for efficient processing of these models without the need to flatten or
+materialize them at design time or at execution time. Relational data in Drill
+is treated as a special or simplified case of complex/multi-structured data.
+
+### **_De-centralized metadata_**
+
+Drill does not have a centralized metadata requirement. You do not need to
+create and manage tables and views in a metadata repository, or rely on a
+database administrator group for such a function. Drill metadata is derived
+from the storage plugins that correspond to data sources. Storage plugins
+provide a spectrum of metadata ranging from full metadata (Hive), partial
+metadata (HBase), or no central metadata (files). De-centralized metadata
+means that Drill is NOT tied to a single Hive repository. You can query
+multiple Hive repositories at once and then combine the data with information
+from HBase tables or with a file in a distributed file system. You can also
+use SQL DDL syntax to create metadata within Drill, which gets organized just
+like a traditional database. Drill metadata is accessible through the ANSI
+standard INFORMATION_SCHEMA database.
+
+### **_Extensibility_**
+
+Drill provides an extensible architecture at all layers, including the storage
+plugin, query, query optimization/execution, and client API layers. You can
+customize any layer for the specific needs of an organization or you can
+extend the layer to a broader array of use cases. Drill provides a built in
+classpath scanning and plugin concept to add additional storage plugins,
+functions, and operators with minimal configuration.
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/drill/blob/cd0a0e96/_docs/architecture/030-architectural-highlights.md
----------------------------------------------------------------------
diff --git a/_docs/architecture/030-architectural-highlights.md b/_docs/architecture/030-architectural-highlights.md
deleted file mode 100644
index b0e54af..0000000
--- a/_docs/architecture/030-architectural-highlights.md
+++ /dev/null
@@ -1,5 +0,0 @@
----
-title: "Architectural Highlights"
-parent: "Architecture"
----
-

http://git-wip-us.apache.org/repos/asf/drill/blob/cd0a0e96/_docs/architecture/030-performance.md
----------------------------------------------------------------------
diff --git a/_docs/architecture/030-performance.md b/_docs/architecture/030-performance.md
new file mode 100644
index 0000000..419e538
--- /dev/null
+++ b/_docs/architecture/030-performance.md
@@ -0,0 +1,55 @@
+---
+title: "Performance"
+parent: "Architecture"
+---
+Drill is designed from the ground up for high performance on large datasets.
+The following core elements of Drill processing are responsible for Drill's
+performance:
+
+**_Distributed engine_**
+
+Drill provides a powerful distributed execution engine for processing queries.
+Users can submit requests to any node in the cluster. You can simply add new
+nodes to the cluster to scale for larger volumes of data, support more users
+or to improve performance.
+
+**_Columnar execution_**
+
+Drill optimizes for both columnar storage and execution by using an in-memory
+data model that is hierarchical and columnar. When working with data stored in
+columnar formats such as Parquet, Drill avoids disk access for columns that
+are not involved in an analytic query. Drill also provides an execution layer
+that performs SQL processing directly on columnar data without row
+materialization. The combination of optimizations for columnar storage and
+direct columnar execution significantly lowers memory footprints and provides
+faster execution of BI/Analytic type of workloads.
+
+**_Vectorization_**
+
+Rather than operating on single values from a single table record at one time,
+vectorization in Drill allows the CPU to operate on vectors, referred to as a
+Record Batches. Record Batches are arrays of values from many different
+records. The technical basis for efficiency of vectorized processing is modern
+chip technology with deep-pipelined CPU designs. Keeping all pipelines full to
+achieve efficiency near peak performance is something impossible to achieve in
+traditional database engines, primarily due to code complexity.
+
+**_Runtime compilation_**
+
+Runtime compilation is faster compared to the interpreted execution. Drill
+generates highly efficient custom code for every single query for every single
+operator. Here is a quick overview of the Drill compilation/code generation
+process at a glance.
+
+![drill compiler]({{ site.baseurl }}/docs/img/58.png)
+
+**Optimistic and pipelined query execution**
+
+Drill adopts an optimistic execution model to process queries. Drill assumes
+that failures are infrequent within the short span of a query and therefore
+does not spend time creating boundaries or checkpoints to minimize recovery
+time. Failures at node level are handled gracefully. In the instance of a
+single query failure, the query is rerun. Drill execution uses a pipeline
+model where all tasks are scheduled at once. The query execution happens in-
+memory as much as possible to move data through task pipelines, persisting to
+disk only if there is memory overflow.
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/drill/blob/cd0a0e96/_docs/architecture/architectural-highlights/010-flexibility.md
----------------------------------------------------------------------
diff --git a/_docs/architecture/architectural-highlights/010-flexibility.md b/_docs/architecture/architectural-highlights/010-flexibility.md
deleted file mode 100644
index a04d4ea..0000000
--- a/_docs/architecture/architectural-highlights/010-flexibility.md
+++ /dev/null
@@ -1,84 +0,0 @@
----
-title: "Flexibility"
-parent: "Architectural Highlights"
----
-The Drill architecture brings the SQL Ecosystem and *Performance* of
-the relational systems to Hadoop scale data *without* compromising 
-the *flexibility* of Hadoop/NoSQL systems. There are several core
-architectural elements in Apache Drill that make it a highly flexible and
-efficient query engine.
-
-The following features contribute to Drill's flexible architecture:
-
-**_Dynamic schema discovery_**
-
-Drill does not require schema or type specification for the data in order to
-start the query execution process. Instead, Drill starts processing the data
-in units called record-batches and discovers the schema on the fly during
-processing. Self-describing data formats such as Parquet, JSON, Avro, and
-NoSQL databases have schema specified as part of the data itself, which Drill
-leverages dynamically at query time. Schema can change over the course of a
-Drill query, so all of the Drill operators are designed to reconfigure
-themselves when such schema changing events occur.
-
-**_Flexible data model_**
-
-Drill is purpose-built from the ground up for complex/multi-structured data
-commonly seen in Hadoop/NoSQL applications such as social/mobile, clickstream,
-logs, and sensor equipped IOT. From a user point of view, Drill allows access
-to nested data attributes, just like SQL columns, and provides intuitive
-extensions to easily operate on them. From an architectural point of view,
-Drill provides a flexible hierarchical columnar data model that can represent
-complex, highly dynamic and evolving data models, and allows for efficient
-processing of it without the need to flatten or materialize it at design time
-or at execution time. Relational data in Drill is treated as a special or
-simplified case of complex/multi-structured data.
-
-**_De-centralized metadata_**
-
-Unlike other SQL-on-Hadoop technologies or any traditional relational
-database, Drill does not have a centralized metadata requirement. In order to
-query data through Drill, users do not need to create and manage tables and
-views in a metadata repository, or rely on a database administrator group for
-such a function.
-
-Drill metadata is derived from the storage plugins that correspond to data
-sources. Drill supports a varied set of storage plugins that provide a
-spectrum of metadata ranging from full metadata such as for Hive, partial
-metadata such as for HBase, or no central metadata such as for files.
-
-De-centralized metadata also means that Drill is NOT tied to a single Hive
-repository. Users can query multiple Hive repositories at once and then
-combine the data with information from HBase tables or with a file in a
-distributed file system.
-
-Users also have the ability to create metadata (tables/views/databases) within
-Drill using the SQL DDL syntax. De-centralized metadata is applicable during
-metadata creation. Drill allows persisting metadata in one of the underlying
-data sources.
-
-From a client access perspective, Drill metadata is organized just like a
-traditional DB (Databases->Tables/Views->Columns). The metadata is accessible
-through the ANSI standard INFORMATION_SCHEMA database
-
-For more information on how to configure and work various data sources with
-Drill, refer to [Connect Apache Drill to Data Sources]({{ site.baseurl }}/docs/connect-a-data-source-introduction).
-
-**_Extensibility_**
-
-Drill provides an extensible architecture at all layers, including the storage
-plugin, query, query optimization/execution, and client API layers. You can
-customize any layer for the specific needs of an organization or you can
-extend the layer to a broader array of use cases.
-
-Drill provides a built in classpath scanning and plugin concept to add
-additional storage plugins, functions, and operators with minimal
-configuration.
-
-The following list provides a few examples of Drill’s extensible architectural
-capabilities:
-
-* A high performance Java API to implement custom UDFs/UDAFs
-* Ability to go beyond Hadoop by implementing custom storage plugins to other data sources such as Oracle/MySQL or NoSQL stores, such as Mongo or Cassandra
-* An API to implement custom operators
-* Support for direct execution of strongly specified JSON based logical and physical plans to help with the simplification of testing, and to enable integration of alternative query languages other than SQL.
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/drill/blob/cd0a0e96/_docs/architecture/architectural-highlights/020-performance.md
----------------------------------------------------------------------
diff --git a/_docs/architecture/architectural-highlights/020-performance.md b/_docs/architecture/architectural-highlights/020-performance.md
deleted file mode 100644
index c6271e0..0000000
--- a/_docs/architecture/architectural-highlights/020-performance.md
+++ /dev/null
@@ -1,55 +0,0 @@
----
-title: "Performance"
-parent: "Architectural Highlights"
----
-Drill is designed from the ground up for high performance on large datasets.
-The following core elements of Drill processing are responsible for Drill's
-performance:
-
-**_Distributed engine_**
-
-Drill provides a powerful distributed execution engine for processing queries.
-Users can submit requests to any node in the cluster. You can simply add new
-nodes to the cluster to scale for larger volumes of data, support more users
-or to improve performance.
-
-**_Columnar execution_**
-
-Drill optimizes for both columnar storage and execution by using an in-memory
-data model that is hierarchical and columnar. When working with data stored in
-columnar formats such as Parquet, Drill avoids disk access for columns that
-are not involved in an analytic query. Drill also provides an execution layer
-that performs SQL processing directly on columnar data without row
-materialization. The combination of optimizations for columnar storage and
-direct columnar execution significantly lowers memory footprints and provides
-faster execution of BI/Analytic type of workloads.
-
-**_Vectorization_**
-
-Rather than operating on single values from a single table record at one time,
-vectorization in Drill allows the CPU to operate on vectors, referred to as a
-Record Batches. Record Batches are arrays of values from many different
-records. The technical basis for efficiency of vectorized processing is modern
-chip technology with deep-pipelined CPU designs. Keeping all pipelines full to
-achieve efficiency near peak performance is something impossible to achieve in
-traditional database engines, primarily due to code complexity.
-
-**_Runtime compilation_**
-
-Runtime compilation is faster compared to the interpreted execution. Drill
-generates highly efficient custom code for every single query for every single
-operator. Here is a quick overview of the Drill compilation/code generation
-process at a glance.
-
-![drill compiler]({{ site.baseurl }}/docs/img/58.png)
-
-**Optimistic and pipelined query execution**
-
-Drill adopts an optimistic execution model to process queries. Drill assumes
-that failures are infrequent within the short span of a query and therefore
-does not spend time creating boundaries or checkpoints to minimize recovery
-time. Failures at node level are handled gracefully. In the instance of a
-single query failure, the query is rerun. Drill execution uses a pipeline
-model where all tasks are scheduled at once. The query execution happens in-
-memory as much as possible to move data through task pipelines, persisting to
-disk only if there is memory overflow.
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/drill/blob/cd0a0e96/_docs/archived-pages/030-partition-pruning.md
----------------------------------------------------------------------
diff --git a/_docs/archived-pages/030-partition-pruning.md b/_docs/archived-pages/030-partition-pruning.md
new file mode 100644
index 0000000..3dc79ff
--- /dev/null
+++ b/_docs/archived-pages/030-partition-pruning.md
@@ -0,0 +1,75 @@
+---
+title: "Partition Pruning"
+parent: "Archived Pages"
+---
+Partition pruning is a performance optimization that limits the number of
+files and partitions that Drill reads when querying file systems and Hive
+tables. Drill only reads a subset of the files that reside in a file system or
+a subset of the partitions in a Hive table when a query matches certain filter
+criteria.
+
+For Drill to apply partition pruning to Hive tables, you must have created the
+tables in Hive using the `PARTITION BY` clause:
+
+`CREATE TABLE <table_name> (<column_name>) PARTITION BY (<column_name>);`
+
+When you create Hive tables using the `PARTITION BY` clause, each partition of
+data is automatically split out into different directories as data is written
+to disk. For more information about Hive partitioning, refer to the [Apache
+Hive wiki](https://cwiki.apache.org/confluence/display/Hive/LanguageManual+DDL/#LanguageManualDDL-PartitionedTables).
+
+Typically, table data in a file system is organized by directories and
+subdirectories. Queries on table data may contain `WHERE` clause filters on
+specific directories.
+
+Drill’s query planner evaluates the filters as part of a Filter operator. If
+no partition filters are present, the underlying Scan operator reads all files
+in all directories and then sends the data to operators downstream, such as
+Filter.
+
+When partition filters are present, the query planner determines if it can
+push the filters down to the Scan such that the Scan only reads the
+directories that match the partition filters, thus reducing disk I/O.
+
+## Partition Pruning Example
+
+The /`Users/max/data/logs` directory in a file system contains subdirectories
+that span a few years.
+
+The following image shows the hierarchical structure of the `…/logs` directory
+and (sub) directories:
+
+![drill query flow]({{ site.baseurl }}/docs/img/54.png)
+
+The following query requests log file data for 2013 from the `…/logs`
+directory in the file system:
+
+    SELECT * FROM dfs.`/Users/max/data/logs` WHERE cust_id < 10 and dir0 = 2013 limit 2;
+
+If you run the `EXPLAIN PLAN` command for the query, you can see that the`
+…/logs` directory is filtered by the scan operator.
+
+    EXPLAIN PLAN FOR SELECT * FROM dfs.`/Users/max/data/logs` WHERE cust_id < 10 and dir0 = 2013 limit 2;
+
+The following image shows a portion of the physical plan when partition
+pruning is applied:
+
+![drill query flow]({{ site.baseurl }}/docs/img/21.png)
+
+## Filter Examples
+
+The following queries include examples of the types of filters eligible for
+partition pruning optimization:
+
+**Example 1: Partition filters ANDed together**
+
+    SELECT * FROM dfs.`/Users/max/data/logs` WHERE dir0 = '2014' AND dir1 = '1'
+
+**Example 2: Partition filter ANDed with regular column filter**
+
+    SELECT * FROM dfs.`/Users/max/data/logs` WHERE cust_id < 10 AND dir0 = 2013 limit 2;
+
+**Example 3: Combination of AND, OR involving partition filters**
+
+    SELECT * FROM dfs.`/Users/max/data/logs` WHERE (dir0 = '2013' AND dir1 = '1') OR (dir0 = '2014' AND dir1 = '2')
+

http://git-wip-us.apache.org/repos/asf/drill/blob/cd0a0e96/_docs/configure-drill/050-configuring-multitenant-resources.md
----------------------------------------------------------------------
diff --git a/_docs/configure-drill/050-configuring-multitenant-resources.md b/_docs/configure-drill/050-configuring-multitenant-resources.md
index c54f430..5771071 100644
--- a/_docs/configure-drill/050-configuring-multitenant-resources.md
+++ b/_docs/configure-drill/050-configuring-multitenant-resources.md
@@ -37,4 +37,4 @@ Currently, you do not manage CPU resources within Drill. [Use Linux `cgroups`](h
 
 ## How to Manage Disk Resources
 
-The `planner.add_producer_consumer` system option enables or disables a secondary reading thread that works out of band of the rest of the scanning fragment to prefetch data from disk. If you interact with a certain type of storage medium that is slow or does not prefetch much data, this option tells Drill to add a producer consumer reading thread to the operation. Drill can then assign one thread that focuses on a single reading fragment. If Drill is using memory, you can disable this option to get better performance. If Drill is using disk space, you should enable this option and set a reasonable queue size for the `planner.producer_consumer_queue_size` option. For more information about these options, see the section,  "Performance Tuning".
\ No newline at end of file
+The `planner.add_producer_consumer` system option enables or disables a secondary reading thread that works out of band of the rest of the scanning fragment to prefetch data from disk. If you interact with a certain type of storage medium that is slow or does not prefetch much data, this option tells Drill to add a producer consumer reading thread to the operation. Drill can then assign one thread that focuses on a single reading fragment. If Drill is using memory, you can disable this option to get better performance. If Drill is using disk space, you should enable this option and set a reasonable queue size for the `planner.producer_consumer_queue_size` option. For more information about these options, see the section, ["Performance Tuning"](/docs/performance-tuning-introduction/).
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/drill/blob/cd0a0e96/_docs/configure-drill/060-configuring-a-shared-drillbit.md
----------------------------------------------------------------------
diff --git a/_docs/configure-drill/060-configuring-a-shared-drillbit.md b/_docs/configure-drill/060-configuring-a-shared-drillbit.md
index 4e47213..1070586 100644
--- a/_docs/configure-drill/060-configuring-a-shared-drillbit.md
+++ b/_docs/configure-drill/060-configuring-a-shared-drillbit.md
@@ -11,7 +11,7 @@ Set [options in sys.options]({{site.baseurl}}/docs/configuration-options-introdu
 * exec.queue.large  
 * exec.queue.small  
 
-For more information, see the section, "Performance Tuning".
+For more information, see the section, ["Performance Tuning"](/docs/performance-tuning-introduction/).
 
 ## Configuring Parallelization
 

http://git-wip-us.apache.org/repos/asf/drill/blob/cd0a0e96/_docs/configure-drill/110-partition-pruning.md
----------------------------------------------------------------------
diff --git a/_docs/configure-drill/110-partition-pruning.md b/_docs/configure-drill/110-partition-pruning.md
deleted file mode 100644
index 09dc626..0000000
--- a/_docs/configure-drill/110-partition-pruning.md
+++ /dev/null
@@ -1,75 +0,0 @@
----
-title: "Partition Pruning"
-parent: "Configure Drill"
----
-Partition pruning is a performance optimization that limits the number of
-files and partitions that Drill reads when querying file systems and Hive
-tables. Drill only reads a subset of the files that reside in a file system or
-a subset of the partitions in a Hive table when a query matches certain filter
-criteria.
-
-For Drill to apply partition pruning to Hive tables, you must have created the
-tables in Hive using the `PARTITION BY` clause:
-
-`CREATE TABLE <table_name> (<column_name>) PARTITION BY (<column_name>);`
-
-When you create Hive tables using the `PARTITION BY` clause, each partition of
-data is automatically split out into different directories as data is written
-to disk. For more information about Hive partitioning, refer to the [Apache
-Hive wiki](https://cwiki.apache.org/confluence/display/Hive/LanguageManual+DDL/#LanguageManualDDL-PartitionedTables).
-
-Typically, table data in a file system is organized by directories and
-subdirectories. Queries on table data may contain `WHERE` clause filters on
-specific directories.
-
-Drill’s query planner evaluates the filters as part of a Filter operator. If
-no partition filters are present, the underlying Scan operator reads all files
-in all directories and then sends the data to operators downstream, such as
-Filter.
-
-When partition filters are present, the query planner determines if it can
-push the filters down to the Scan such that the Scan only reads the
-directories that match the partition filters, thus reducing disk I/O.
-
-## Partition Pruning Example
-
-The /`Users/max/data/logs` directory in a file system contains subdirectories
-that span a few years.
-
-The following image shows the hierarchical structure of the `…/logs` directory
-and (sub) directories:
-
-![drill query flow]({{ site.baseurl }}/docs/img/54.png)
-
-The following query requests log file data for 2013 from the `…/logs`
-directory in the file system:
-
-    SELECT * FROM dfs.`/Users/max/data/logs` WHERE cust_id < 10 and dir0 = 2013 limit 2;
-
-If you run the `EXPLAIN PLAN` command for the query, you can see that the`
-…/logs` directory is filtered by the scan operator.
-
-    EXPLAIN PLAN FOR SELECT * FROM dfs.`/Users/max/data/logs` WHERE cust_id < 10 and dir0 = 2013 limit 2;
-
-The following image shows a portion of the physical plan when partition
-pruning is applied:
-
-![drill query flow]({{ site.baseurl }}/docs/img/21.png)
-
-## Filter Examples
-
-The following queries include examples of the types of filters eligible for
-partition pruning optimization:
-
-**Example 1: Partition filters ANDed together**
-
-    SELECT * FROM dfs.`/Users/max/data/logs` WHERE dir0 = '2014' AND dir1 = '1'
-
-**Example 2: Partition filter ANDed with regular column filter**
-
-    SELECT * FROM dfs.`/Users/max/data/logs` WHERE cust_id < 10 AND dir0 = 2013 limit 2;
-
-**Example 3: Combination of AND, OR involving partition filters**
-
-    SELECT * FROM dfs.`/Users/max/data/logs` WHERE (dir0 = '2013' AND dir1 = '1') OR (dir0 = '2014' AND dir1 = '2')
-

http://git-wip-us.apache.org/repos/asf/drill/blob/cd0a0e96/_docs/configure-drill/120-configuring-the-drill-shell.md
----------------------------------------------------------------------
diff --git a/_docs/configure-drill/120-configuring-the-drill-shell.md b/_docs/configure-drill/120-configuring-the-drill-shell.md
index d7c474a..383bc1b 100644
--- a/_docs/configure-drill/120-configuring-the-drill-shell.md
+++ b/_docs/configure-drill/120-configuring-the-drill-shell.md
@@ -11,26 +11,26 @@ Formatting tables takes time, which you might notice if running a huge query usi
 
 The following table lists the commands that you can run on the Drill command line.
 
-| Command       | Description                                                                                             |
-|---------------|---------------------------------------------------------------------------------------------------------|
-| !brief        | Set verbose mode off.                                                                                   |
-| !close        | Close the current connection to the database.                                                           |
-| !closeall     | Close all current open connections.                                                                     |
-| !connect      | Open a new connection to the database.                                                                  |
-| !help         | Print a summary of command usage.                                                                       |
-| !history      | Display the command history.                                                                            |
-| !list         | List the current connections.                                                                           |
-| !outputformat | Set the output format for displaying results.                                                           |
-| !properties   | Connect to the database specified in the properties file(s).                                            |
-| !quit         | Exits the Drill shell.                                                                                  |
-| !reconnect    | Reconnect to the database.                                                                              |
-| !record       | Record all output to the specified file.                                                                |
-| !run          | Run a script from the specified file.                                                                   |
-| !save         | Save the current variables and aliases.                                                                 |
-| !script       | Start saving a script to a file.                                                                        |
-| !set          | Set a [sqlline variable]({{site.baseurl}}/docs/configuring-the-drill-shell/#the-set-command-variables). |
-| !tables       | List all the tables in the database.                                                                    |
-| !verbose      | Show unabbreviated error messages.                                                                      |
+| Command       | Description                                                                                                                           |
+|---------------|---------------------------------------------------------------------------------------------------------------------------------------|
+| !brief        | Set verbose mode off.                                                                                                                 |
+| !close        | Close the current connection to the database.                                                                                         |
+| !closeall     | Close all current open connections.                                                                                                   |
+| !connect      | Open a new connection to the database.                                                                                                |
+| !help         | Print a summary of command usage.                                                                                                     |
+| !history      | Display the command history.                                                                                                          |
+| !list         | List the current connections.                                                                                                         |
+| !outputformat | Set the output format for displaying results.                                                                                         |
+| !properties   | Connect to the database specified in the properties file(s).                                                                          |
+| !quit         | Exits the Drill shell.                                                                                                                |
+| !reconnect    | Reconnect to the database.                                                                                                            |
+| !record       | Record all output to the specified file.                                                                                              |
+| !run          | Run a script from the specified file.                                                                                                 |
+| !save         | Save the current variables and aliases.                                                                                               |
+| !script       | Start saving a script to a file.                                                                                                      |
+| !set          | Set the given variable. See [The Set Command Variables]({{site.baseurl}}/docs/configuring-the-drill-shell#the-set-command-variables). |
+| !tables       | List all the tables in the database.                                                                                                  |
+| !verbose      | Show unabbreviated error messages.                                                                                                    |
 
 ## Examples of Configuring the Drill Shell
 
@@ -54,29 +54,29 @@ Set the output format to CSV to improve performance of a huge query.
 
 ## The Set Command Variables
 
-| Variable Name   | Valid Variable Values  | Description                                                                                                                                                          |
-|-----------------|------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| autoCommit      | true/false             | Enable/disable automatic transaction commit.                                                                                                                         |
-| autoSave        | true/false             | Automatically save preferences.                                                                                                                                      |
-| color           | true/false             | Control whether color is used for display.                                                                                                                           |
-| fastConnect     | true/false             | Skip building table/column list for tab-completion.                                                                                                                  |
-| force           | true/false             | Continue running script even after errors.                                                                                                                           |
-| headerInterval  | \<integer\>            | The interval between which headers are displayed.                                                                                                                    |
-| historyFile     | \<path\>               | File in which to save command history. Default is $HOME/.sqlline/history (UNIX, Linux, Mac OS), $HOME/sqlline/history (Windows).                                     |
-| incremental     | true/false             | Do not receive all rows from server before printing the first row. Uses fewer resources, especially for long-running queries, but column widths may be incorrect.    |
-| isolation       | \<level\>              | Set transaction isolation level.                                                                                                                                     |
-| maxColumnWidth  | \<integer\>            | Maximum width for displaying columns.                                                                                                                                |
-| maxHeight       | \<integer\>            | Maximum height of the terminal.                                                                                                                                      |
-| maxWidth        | \<integer\>            | Maximum width of the terminal.                                                                                                                                       |
-| numberFormat    | \<pattern\>            | Format numbers using DecimalFormat pattern.                                                                                                                          |
-| outputFormat    | table/vertical/csv/tsv | Format mode for result display.                                                                                                                                      |
-| properties      | \<path\>               | File from which SqlLine reads properties on startup. Default is $HOME/.sqlline/sqlline.properties (UNIX, Linux, Mac OS), $HOME/sqlline/sqlline.properties (Windows). |
-| rowLimit        | \<integer\>            | Maximum number of rows returned from a query; zero means no limit.                                                                                                   |
-| showElapsedTime | true/false             | Display execution time when verbose.                                                                                                                                 |
-| showHeader      | true/false             | Show column names in query results.                                                                                                                                  |
-| showNestedErrs  | true/false             | Display nested errors.                                                                                                                                               |
-| showWarnings    | true/false             | Display connection warnings.                                                                                                                                         |
-| silent          | true/false             | Disable or enable showing information specified by show commands.                                                                                                    |
-| timeout         | \<integer\>            | Query timeout in seconds; less than zero means no timeout.                                                                                                           |
-| trimScripts     | true/false             | Remove trailing spaces from lines read from script files.                                                                                                            |
-| verbose         | true/false             | Show unabbreviated error messages and debug info.                                                                                                                    |
\ No newline at end of file
+| Variable Name   | Valid Variable Values  | Description                                                                                                                                                            |
+|-----------------|------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| autoCommit      | true/false             | Enable/disable automatic transaction commit.                                                                                                                           |
+| autoSave        | true/false             | Automatically save preferences.                                                                                                                                        |
+| color           | true/false             | Control whether color is used for display.                                                                                                                             |
+| fastConnect     | true/false             | Skip building table/column list for tab-completion.                                                                                                                    |
+| force           | true/false             | Continue running script even after errors.                                                                                                                             |
+| headerInterval  | \<integer\>            | The interval between which headers are displayed.                                                                                                                      |
+| historyFile     | \<path\>               | File in which to save command history. Default is $HOME/.sqlline/history (UNIX, Linux, Mac OS), $HOME/sqlline/history (Windows).                                       |
+| incremental     | true/false             | Do not receive all rows from server before printing the first row. Uses fewer resources, especially for long-running queries, but column widths may be incorrect.      |
+| isolation       | \<level\>              | Set transaction isolation level.                                                                                                                                       |
+| maxColumnWidth  | \<integer\>            | Maximum width for displaying columns.                                                                                                                                  |
+| maxHeight       | \<integer\>            | Maximum height of the terminal.                                                                                                                                        |
+| maxWidth        | \<integer\>            | Maximum width of the terminal.                                                                                                                                         |
+| numberFormat    | \<pattern\>            | Format numbers using DecimalFormat pattern.                                                                                                                            |
+| outputFormat    | table/vertical/csv/tsv | Format mode for result display.                                                                                                                                        |
+| properties      | \<path\>               | File from which the shell reads properties on startup. Default is $HOME/.sqlline/sqlline.properties (UNIX, Linux, Mac OS), $HOME/sqlline/sqlline.properties (Windows). |
+| rowLimit        | \<integer\>            | Maximum number of rows returned from a query; zero means no limit.                                                                                                     |
+| showElapsedTime | true/false             | Display execution time when verbose.                                                                                                                                   |
+| showHeader      | true/false             | Show column names in query results.                                                                                                                                    |
+| showNestedErrs  | true/false             | Display nested errors.                                                                                                                                                 |
+| showWarnings    | true/false             | Display connection warnings.                                                                                                                                           |
+| silent          | true/false             | Disable or enable showing information specified by show commands.                                                                                                      |
+| timeout         | \<integer\>            | Query timeout in seconds; less than zero means no timeout.                                                                                                             |
+| trimScripts     | true/false             | Remove trailing spaces from lines read from script files.                                                                                                              |
+| verbose         | true/false             | Show unabbreviated error messages and debug info.                                                                                                                      |
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/drill/blob/cd0a0e96/_docs/connect-a-data-source/090-mongodb-plugin-for-apache-drill.md
----------------------------------------------------------------------
diff --git a/_docs/connect-a-data-source/090-mongodb-plugin-for-apache-drill.md b/_docs/connect-a-data-source/090-mongodb-plugin-for-apache-drill.md
index 75976c1..ff1c736 100644
--- a/_docs/connect-a-data-source/090-mongodb-plugin-for-apache-drill.md
+++ b/_docs/connect-a-data-source/090-mongodb-plugin-for-apache-drill.md
@@ -32,9 +32,7 @@ UI to connect to Drill. Drill must be running in order to access the Web UI.
 
 Complete the following steps to configure MongoDB as a data source for Drill:
 
-  1. Navigate to Drill installation directory and invoke SQLLine to run Drill queries. On Linux, for example:
-
-        bin/sqlline -u jdbc:drill:zk=local
+  1. [Start the Drill]({{site.baseurl}}/docs/starting-drill-on-linux-and-mac-os-x/) shell for your environment.
 
      Do not enter any commands. You will return to the command prompt after
 completing the configuration in the Drill Web UI.
@@ -62,17 +60,17 @@ Drill data sources, including MongoDB. If you downloaded the zip codes file,
 you should see `mongo.zipdb` in the results.
 
     0: jdbc:drill:zk=local> SHOW DATABASES;
-    +-------------+
-    | SCHEMA_NAME |
-    +-------------+
-    | dfs.default |
-    | dfs.root    |
-    | dfs.tmp     |
-    | sys         |
-    | mongo.zipdb |
-    | cp.default  |
+    +--------------------+
+    |     SCHEMA_NAME    |
+    +--------------------+
+    | dfs.default        |
+    | dfs.root           |
+    | dfs.tmp            |
+    | sys                |
+    | mongo.zipdb        |
+    | cp.default         |
     | INFORMATION_SCHEMA |
-    +-------------+
+    +--------------------+
 
 If you want all queries that you submit to run on `mongo.zipdb`, you can issue
 the `USE` command to change schema.
@@ -87,19 +85,19 @@ Reference]({{ site.baseurl }}/docs/sql-reference).
 **Example 1: View mongo.zipdb Dataset**
 
     0: jdbc:drill:zk=local> SELECT * FROM zipcodes LIMIT 10;
-    +------------+
-    |     *      |
-    +------------+
-    | { "city" : "AGAWAM" , "loc" : [ -72.622739 , 42.070206] , "pop" : 15338 , "state" : "MA"} |
-    | { "city" : "CUSHMAN" , "loc" : [ -72.51565 , 42.377017] , "pop" : 36963 , "state" : "MA"} |
-    | { "city" : "BARRE" , "loc" : [ -72.108354 , 42.409698] , "pop" : 4546 , "state" : "MA"} |
-    | { "city" : "BELCHERTOWN" , "loc" : [ -72.410953 , 42.275103] , "pop" : 10579 , "state" : "MA"} |
-    | { "city" : "BLANDFORD" , "loc" : [ -72.936114 , 42.182949] , "pop" : 1240 , "state" : "MA"} |
-    | { "city" : "BRIMFIELD" , "loc" : [ -72.188455 , 42.116543] , "pop" : 3706 , "state" : "MA"} |
-    | { "city" : "CHESTER" , "loc" : [ -72.988761 , 42.279421] , "pop" : 1688 , "state" : "MA"} |
-    | { "city" : "CHESTERFIELD" , "loc" : [ -72.833309 , 42.38167] , "pop" : 177 , "state" : "MA"} |
-    | { "city" : "CHICOPEE" , "loc" : [ -72.607962 , 42.162046] , "pop" : 23396 , "state" : "MA"} |
-    | { "city" : "CHICOPEE" , "loc" : [ -72.576142 , 42.176443] , "pop" : 31495 , "state" : "MA"} |
++------------------------------------------------------------------------------------------------+
+|                                           *                                                    |
++------------------------------------------------------------------------------------------------+
+| { "city" : "AGAWAM" , "loc" : [ -72.622739 , 42.070206] , "pop" : 15338 , "state" : "MA"}      |
+| { "city" : "CUSHMAN" , "loc" : [ -72.51565 , 42.377017] , "pop" : 36963 , "state" : "MA"}      |
+| { "city" : "BARRE" , "loc" : [ -72.108354 , 42.409698] , "pop" : 4546 , "state" : "MA"}        |
+| { "city" : "BELCHERTOWN" , "loc" : [ -72.410953 , 42.275103] , "pop" : 10579 , "state" : "MA"} |
+| { "city" : "BLANDFORD" , "loc" : [ -72.936114 , 42.182949] , "pop" : 1240 , "state" : "MA"}    |
+| { "city" : "BRIMFIELD" , "loc" : [ -72.188455 , 42.116543] , "pop" : 3706 , "state" : "MA"}    |
+| { "city" : "CHESTER" , "loc" : [ -72.988761 , 42.279421] , "pop" : 1688 , "state" : "MA"}      |
+| { "city" : "CHESTERFIELD" , "loc" : [ -72.833309 , 42.38167] , "pop" : 177 , "state" : "MA"}   |
+| { "city" : "CHICOPEE" , "loc" : [ -72.607962 , 42.162046] , "pop" : 23396 , "state" : "MA"}    |
+| { "city" : "CHICOPEE" , "loc" : [ -72.576142 , 42.176443] , "pop" : 31495 , "state" : "MA"}    |
 
 **Example 2: Aggregation**
 

http://git-wip-us.apache.org/repos/asf/drill/blob/cd0a0e96/_docs/getting-started/010-drill-introduction.md
----------------------------------------------------------------------
diff --git a/_docs/getting-started/010-drill-introduction.md b/_docs/getting-started/010-drill-introduction.md
index 3645cfb..de01c2b 100644
--- a/_docs/getting-started/010-drill-introduction.md
+++ b/_docs/getting-started/010-drill-introduction.md
@@ -9,9 +9,11 @@ applications, while still providing the familiarity and ecosystem of ANSI SQL,
 the industry-standard query language. Drill provides plug-and-play integration
 with existing Apache Hive and Apache HBase deployments. 
 
+## What's New in Apache Drill 1.0
+
 Apache Drill 1.0 offers the following new features:
 
-* Many performance planning and execution improvements, including a new text reader for faster join planning that complies with RFC 4180.
+* Many performance planning and execution [improvements](/docs/performance-tuning-introduction/), including a new text reader for faster join planning that complies with RFC 4180.
 * Updated [Drill shell]({{site.baseurl}}/docs/configuring-the-drill-shell/#examples-of-configuring-the-drill-shell) and now formats query results having fewer than 70 characters in a column.
 * [Query audit logging]({{site.baseurl}}/docs/getting-query-information/) for getting the query history on a Drillbit.
 * Improved connection handling.
@@ -20,6 +22,8 @@ Apache Drill 1.0 offers the following new features:
 
 In this release, Drill disables the DECIMAL data type, including casting to DECIMAL and reading DECIMAL types from Parquet and Hive. To enable the DECIMAL type, set the `planner.enable_decimal_data_type` system option to `true`.
 
+## Apache Drill Key Features
+
 Key features of Apache Drill are:
 
   * Low-latency SQL queries
@@ -38,30 +42,3 @@ If you've never used Drill, visit these links to get a jump start:
 * [SQL Support]({{ site.baseurl }}/docs/sql-reference-introduction/)
 * [Drill Tutorials]({{ site.baseurl }}/docs/tutorials-introduction)
 
-## Videos
-
-<table>
-  <tr>
-    <th><a href="http://www.youtube.com/watch?feature=player_embedded&v=HITzj3ihSUk
-" target="_blank"><img src="http://img.youtube.com/vi/HITzj3ihSUk/0.jpg" 
-alt="intro to Drill" width="240" height="180" border="10" />Introduction to Apache Drill</a></th>
-    <th><a href="http://www.youtube.com/watch?feature=player_embedded&v=FkcegazNuio
-" target="_blank"><img src="http://img.youtube.com/vi/FkcegazNuio/0.jpg" 
-alt="Tableau and Drill" width="240" height="180" border="10" />Tableau + Drill</a</th>
-  </tr>
-  <tr>
-    <td><a href="http://www.youtube.com/watch?feature=player_embedded&v=kG6vzsk8T7E
-" target="_blank"><img src="http://img.youtube.com/vi/kG6vzsk8T7E/0.jpg" 
-alt="drill config options" width="240" height="180" border="10" />Drill Configuration Options</a></td>
-    <td><a href="http://www.youtube.com/watch?feature=player_embedded&v=XUIKlsX8yVM
-" target="_blank"><img src="http://img.youtube.com/vi/XUIKlsX8yVM/0.jpg" 
-alt="Self-service SQL Exploration on Mongo" width="240" height="180" border="10" />Self-service SQL Exploration on MongoDB</a></td>
-  </tr>
-  <tr>
-    <td><a href="http://www.youtube.com/watch?feature=player_embedded&v=uyN9DDCNP8o
-" target="_blank"><img src="http://img.youtube.com/vi/uyN9DDCNP8o/0.jpg" 
-alt="Microstrategy and Drill" width="240" height="180" border="10" />Microstrategy + Drill</a></td>
-    <td></td>
-  </tr>
-</table>
-


[04/31] drill git commit: Merge branch 'gh-pages' of https://github.com/tshiran/drill into gh-pages

Posted by ts...@apache.org.
Merge branch 'gh-pages' of https://github.com/tshiran/drill into gh-pages


Project: http://git-wip-us.apache.org/repos/asf/drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/drill/commit/c240e01f
Tree: http://git-wip-us.apache.org/repos/asf/drill/tree/c240e01f
Diff: http://git-wip-us.apache.org/repos/asf/drill/diff/c240e01f

Branch: refs/heads/gh-pages
Commit: c240e01f0e2f80b7168e6cb8800412ec8f4741fb
Parents: 2c75f6d 1b7072c
Author: Kristine Hahn <kh...@maprtech.com>
Authored: Sun May 17 11:24:36 2015 -0700
Committer: Kristine Hahn <kh...@maprtech.com>
Committed: Sun May 17 11:24:36 2015 -0700

----------------------------------------------------------------------
 _config-prod.yml                                |   2 +-
 _config.yml                                     |   3 +
 .../075-configuring-user-authentication.md      |   2 +-
 _includes/head.html                             |  20 +-
 _layouts/docpage.html                           |  57 +-
 _layouts/post.html                              |   3 +-
 _sass/_doc-breadcrumbs.scss                     |  41 +
 _sass/_doc-code.scss                            |  77 ++
 _sass/_doc-content.scss                         | 423 +++++++++
 _sass/_doc-syntax.scss                          |  60 ++
 _sass/_download.scss                            |  33 +
 _sass/_home-code.scss                           |   5 +
 _sass/_home-video-box.scss                      |  55 ++
 _sass/_home-video-slider.scss                   |  34 +
 _sass/_site-arrows.scss                         |  83 ++
 _sass/_site-main.scss                           | 902 +++++++++++++++++++
 _sass/_site-responsive.scss                     | 264 ++++++
 _sass/_site-search.scss                         |   9 +
 blog.html                                       |   5 +-
 blog/_drafts/drill-1.0-released.md              |   2 +-
 ...-12-11-apache-drill-qa-panelist-spotlight.md |   5 +-
 css/arrows.css                                  |  86 --
 css/breadcrumbs.css                             |  40 -
 css/code.css                                    |  69 --
 css/content.scss                                |   6 +
 css/docpage.css                                 | 389 --------
 css/download.css                                |  33 -
 css/download.scss                               |   3 +
 css/home.scss                                   |   4 +
 css/responsive.css                              | 275 ------
 css/search.css                                  |   9 -
 css/site.scss                                   |   6 +
 css/style.css                                   | 897 ------------------
 css/syntax.css                                  |  60 --
 css/video-box.css                               |  55 --
 css/video-slider.css                            |  34 -
 images/home-json.png                            | Bin 54424 -> 41663 bytes
 index.html                                      |  22 +-
 js/drill.js                                     |   4 +-
 39 files changed, 2070 insertions(+), 2007 deletions(-)
----------------------------------------------------------------------



[10/31] drill git commit: add decimal note to data type conversion functions

Posted by ts...@apache.org.
add decimal note to data type conversion functions


Project: http://git-wip-us.apache.org/repos/asf/drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/drill/commit/f61eaf9e
Tree: http://git-wip-us.apache.org/repos/asf/drill/tree/f61eaf9e
Diff: http://git-wip-us.apache.org/repos/asf/drill/diff/f61eaf9e

Branch: refs/heads/gh-pages
Commit: f61eaf9eaa3bc910a4561d75681b573e2031e9aa
Parents: 6089ab0
Author: Kristine Hahn <kh...@maprtech.com>
Authored: Sun May 17 19:43:59 2015 -0700
Committer: Kristine Hahn <kh...@maprtech.com>
Committed: Sun May 17 19:43:59 2015 -0700

----------------------------------------------------------------------
 _docs/075-getting-query-information.md                        | 2 +-
 _docs/sql-reference/sql-functions/020-data-type-conversion.md | 6 ++++++
 2 files changed, 7 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/drill/blob/f61eaf9e/_docs/075-getting-query-information.md
----------------------------------------------------------------------
diff --git a/_docs/075-getting-query-information.md b/_docs/075-getting-query-information.md
index 57c1678..d5e2953 100644
--- a/_docs/075-getting-query-information.md
+++ b/_docs/075-getting-query-information.md
@@ -2,7 +2,7 @@
 title: "Getting Query Information"
 parent: "Query Audit Logging"
 ---
-The query log provides audit log functionality for the queries executed by various drillbits in the cluster. The log records important information about queries executed on the Drillbit where Drill runs. The log includes query text, start time, end time, user, status, schema, and the query id. You can query one of the following log files, depending on whether you run Drill in embedded or distributed mode, to get audit logging information.:
+The query log provides audit log functionality for the queries executed by various drillbits in the cluster. The log records important information about queries executed on the Drillbit where Drill runs. The log includes query text, start time, end time, user, status, schema, and the query id. You can query one of the following log files, depending on whether you run Drill in embedded or distributed mode, to get audit logging information:
 
 * `sqlline_queries.json` (embedded mode) 
 * `drillbit_queries.json` (distributed mode)

http://git-wip-us.apache.org/repos/asf/drill/blob/f61eaf9e/_docs/sql-reference/sql-functions/020-data-type-conversion.md
----------------------------------------------------------------------
diff --git a/_docs/sql-reference/sql-functions/020-data-type-conversion.md b/_docs/sql-reference/sql-functions/020-data-type-conversion.md
index 239da85..a6ec16e 100644
--- a/_docs/sql-reference/sql-functions/020-data-type-conversion.md
+++ b/_docs/sql-reference/sql-functions/020-data-type-conversion.md
@@ -30,6 +30,8 @@ If the SELECT statement includes a WHERE clause that compares a column of an unk
 
     SELECT c_row, CAST(c_int AS DECIMAL(28,8)) FROM mydata WHERE CAST(c_int AS DECIMAL(28,8)) > -3.0;
 
+{% include startnote.html %}In this release, Drill disables the DECIMAL data type. To enable, set the planner.enable_decimal_data_type option to `true`.{% include endnote.html %}
+
 Use CONVERT_TO and CONVERT_FROM instead of the CAST function for converting binary data types with one exception: When converting an INT or BIGINT number, having a byte count in the destination/source that does not match the byte count of the number in the VARBINARY source/destination, use CAST.  
 
 Refer to the following tables for information about the data types to use for casting:
@@ -54,6 +56,8 @@ The following example shows how to cast a character to a DECIMAL having two deci
     | 1.00       |
     +------------+
 
+{% include startnote.html %}In this release, Drill disables the DECIMAL data type. To enable, set the planner.enable_decimal_data_type option to `true`.{% include endnote.html %}
+
 ### Casting a Number to a Character String
 The first example shows Drill casting a number to a VARCHAR having a length of 3 bytes: The result is a 3-character string, 456. Drill supports the CHAR and CHARACTER VARYING alias.
 
@@ -85,6 +89,8 @@ Cast an integer to a decimal.
     +------------+
     1 row selected (0.08 seconds)
 
+{% include startnote.html %}In this release, Drill disables the DECIMAL data type. To enable, set the planner.enable_decimal_data_type option to `true`.{% include endnote.html %}
+
 ### Casting Intervals
 
 To cast interval data to the INTERVALDAY or INTERVALYEAR types use the following syntax:


[25/31] drill git commit: add BB's clause pages, renamed sql command pages

Posted by ts...@apache.org.
add BB's clause pages, renamed sql command pages


Project: http://git-wip-us.apache.org/repos/asf/drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/drill/commit/9700ff63
Tree: http://git-wip-us.apache.org/repos/asf/drill/tree/9700ff63
Diff: http://git-wip-us.apache.org/repos/asf/drill/diff/9700ff63

Branch: refs/heads/gh-pages
Commit: 9700ff63a6be5eaa41227a6691ddfc8d62524fc9
Parents: 4f4d4fb
Author: Kristine Hahn <kh...@maprtech.com>
Authored: Mon May 18 12:45:56 2015 -0700
Committer: Kristine Hahn <kh...@maprtech.com>
Committed: Mon May 18 12:45:56 2015 -0700

----------------------------------------------------------------------
 _data/docs.json                                 | 1658 +++++++++++++-----
 .../070-configuring-user-impersonation.md       |    2 +-
 .../050-json-data-model.md                      |    2 +-
 _docs/sql-reference/040-operators.md            |    2 +-
 .../sql-commands/005-supported-sql-commands.md  |    2 +-
 .../sql-commands/010-alter-session-command.md   |   74 -
 .../sql-commands/010-alter-session.md           |   74 +
 .../sql-commands/020-alter-system.md            |    2 +-
 .../sql-commands/030-create-table-as-command.md |  134 --
 .../sql-commands/030-create-table-as.md         |  134 ++
 .../sql-commands/050-create-view-command.md     |  197 ---
 .../sql-commands/050-create-view.md             |  197 +++
 .../sql-reference/sql-commands/055-drop-view.md |   47 +
 .../sql-commands/060-describe-command.md        |   99 --
 .../sql-reference/sql-commands/060-describe.md  |   99 ++
 .../sql-commands/070-explain-commands.md        |  156 --
 _docs/sql-reference/sql-commands/070-explain.md |  156 ++
 _docs/sql-reference/sql-commands/080-select.md  |   95 +-
 .../sql-commands/081-select-from.md             |   87 +
 .../sql-commands/082-select-group-by.md         |   51 +
 .../sql-commands/083-select-having.md           |   51 +
 .../sql-commands/084-select-limit.md            |   51 +
 .../sql-commands/085-select-offset.md           |   29 +
 .../sql-commands/086-select-order-by.md         |   71 +
 .../sql-commands/087-select-union.md            |   42 +
 .../sql-commands/088-select-where.md            |   59 +
 .../sql-commands/089-select-with.md             |   95 +
 .../090-show-databases-and-show-schemas.md      |    2 +-
 .../sql-commands/100-show-files.md              |    2 +-
 .../sql-commands/110-show-tables-command.md     |  136 --
 .../sql-commands/110-show-tables.md             |  136 ++
 .../sql-commands/120-use-command.md             |  170 --
 _docs/sql-reference/sql-commands/120-use.md     |  170 ++
 33 files changed, 2806 insertions(+), 1476 deletions(-)
----------------------------------------------------------------------



[16/31] drill git commit: DRILL-2098

Posted by ts...@apache.org.
DRILL-2098


Project: http://git-wip-us.apache.org/repos/asf/drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/drill/commit/12b47f17
Tree: http://git-wip-us.apache.org/repos/asf/drill/tree/12b47f17
Diff: http://git-wip-us.apache.org/repos/asf/drill/diff/12b47f17

Branch: refs/heads/gh-pages
Commit: 12b47f17363f765ae9b66da54dbede6017c9480f
Parents: 5f6a51a
Author: Kristine Hahn <kh...@maprtech.com>
Authored: Mon May 18 07:26:31 2015 -0700
Committer: Kristine Hahn <kh...@maprtech.com>
Committed: Mon May 18 07:26:31 2015 -0700

----------------------------------------------------------------------
 .../050-starting-drill-on-windows.md                         | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/drill/blob/12b47f17/_docs/install/installing-drill-in-embedded-mode/050-starting-drill-on-windows.md
----------------------------------------------------------------------
diff --git a/_docs/install/installing-drill-in-embedded-mode/050-starting-drill-on-windows.md b/_docs/install/installing-drill-in-embedded-mode/050-starting-drill-on-windows.md
index e8ef3e4..130faa1 100644
--- a/_docs/install/installing-drill-in-embedded-mode/050-starting-drill-on-windows.md
+++ b/_docs/install/installing-drill-in-embedded-mode/050-starting-drill-on-windows.md
@@ -5,10 +5,10 @@ parent: "Installing Drill in Embedded Mode"
 Start the Drill shell using the **sqlline command**. The `zk=local` means the local node is the ZooKeeper node. Complete the following steps to launch the Drill shell:
 
 1. Open the apache-drill-0.1.0 folder.  
-2. Open the bin folder, and double-click the `sqlline.bat` file:
-   ![drill bin dir]({{ site.baseurl }}/docs/img/drill-bin.png)
-   The Windows command prompt opens.  
-3. At the sqlline> prompt, type `!connect jdbc:drill:zk=local` and then press Enter:
+2. Go to the bin directory.
+2. Open Command Prompt and type the following command on the command line:
+   ``sqlline.bat -u "jdbc:drill:zk=local"``
+3. At the sqlline> prompt, type `"!connect jdbc:drill:zk=local"` and then press Enter:
    ![sqlline]({{ site.baseurl }}/docs/img/sqlline1.png)
 4. Enter the username, `admin`, and password, also `admin` when prompted.
    The `0: jdbc:drill:zk=local>` prompt appears.


[24/31] drill git commit: add BB's clause pages, renamed sql command pages

Posted by ts...@apache.org.
http://git-wip-us.apache.org/repos/asf/drill/blob/9700ff63/_data/docs.json
----------------------------------------------------------------------
diff --git a/_data/docs.json b/_data/docs.json
index 6f8cde1..7fa1345 100644
--- a/_data/docs.json
+++ b/_data/docs.json
@@ -17,7 +17,7 @@
             "title": "2014 Q1 Drill Report", 
             "url": "/docs/2014-q1-drill-report/"
         }, 
-        "ALTER SESSION Command": {
+        "ALTER SESSION": {
             "breadcrumbs": [
                 {
                     "title": "SQL Commands", 
@@ -29,16 +29,16 @@
                 }
             ], 
             "children": [], 
-            "next_title": "ALTER SYSTEM Command", 
-            "next_url": "/docs/alter-system-command/", 
+            "next_title": "ALTER SYSTEM", 
+            "next_url": "/docs/alter-system/", 
             "parent": "SQL Commands", 
             "previous_title": "Supported SQL Commands", 
             "previous_url": "/docs/supported-sql-commands/", 
-            "relative_path": "_docs/sql-reference/sql-commands/010-alter-session-command.md", 
-            "title": "ALTER SESSION Command", 
-            "url": "/docs/alter-session-command/"
+            "relative_path": "_docs/sql-reference/sql-commands/010-alter-session.md", 
+            "title": "ALTER SESSION", 
+            "url": "/docs/alter-session/"
         }, 
-        "ALTER SYSTEM Command": {
+        "ALTER SYSTEM": {
             "breadcrumbs": [
                 {
                     "title": "SQL Commands", 
@@ -50,14 +50,14 @@
                 }
             ], 
             "children": [], 
-            "next_title": "CREATE TABLE AS (CTAS) Command", 
-            "next_url": "/docs/create-table-as-ctas-command/", 
+            "next_title": "CREATE TABLE AS (CTAS)", 
+            "next_url": "/docs/create-table-as-ctas/", 
             "parent": "SQL Commands", 
-            "previous_title": "ALTER SESSION Command", 
-            "previous_url": "/docs/alter-session-command/", 
+            "previous_title": "ALTER SESSION", 
+            "previous_url": "/docs/alter-session/", 
             "relative_path": "_docs/sql-reference/sql-commands/020-alter-system.md", 
-            "title": "ALTER SYSTEM Command", 
-            "url": "/docs/alter-system-command/"
+            "title": "ALTER SYSTEM", 
+            "url": "/docs/alter-system/"
         }, 
         "AOL Search": {
             "breadcrumbs": [
@@ -555,7 +555,7 @@
             "title": "CASE", 
             "url": "/docs/case/"
         }, 
-        "CREATE TABLE AS (CTAS) Command": {
+        "CREATE TABLE AS (CTAS)": {
             "breadcrumbs": [
                 {
                     "title": "SQL Commands", 
@@ -567,16 +567,16 @@
                 }
             ], 
             "children": [], 
-            "next_title": "CREATE VIEW Command", 
-            "next_url": "/docs/create-view-command/", 
+            "next_title": "CREATE VIEW", 
+            "next_url": "/docs/create-view/", 
             "parent": "SQL Commands", 
-            "previous_title": "ALTER SYSTEM Command", 
-            "previous_url": "/docs/alter-system-command/", 
-            "relative_path": "_docs/sql-reference/sql-commands/030-create-table-as-command.md", 
-            "title": "CREATE TABLE AS (CTAS) Command", 
-            "url": "/docs/create-table-as-ctas-command/"
+            "previous_title": "ALTER SYSTEM", 
+            "previous_url": "/docs/alter-system/", 
+            "relative_path": "_docs/sql-reference/sql-commands/030-create-table-as.md", 
+            "title": "CREATE TABLE AS (CTAS)", 
+            "url": "/docs/create-table-as-ctas/"
         }, 
-        "CREATE VIEW Command": {
+        "CREATE VIEW": {
             "breadcrumbs": [
                 {
                     "title": "SQL Commands", 
@@ -588,14 +588,14 @@
                 }
             ], 
             "children": [], 
-            "next_title": "DESCRIBE Command", 
-            "next_url": "/docs/describe-command/", 
+            "next_title": "DROP VIEW", 
+            "next_url": "/docs/drop-view/", 
             "parent": "SQL Commands", 
-            "previous_title": "CREATE TABLE AS (CTAS) Command", 
-            "previous_url": "/docs/create-table-as-ctas-command/", 
-            "relative_path": "_docs/sql-reference/sql-commands/050-create-view-command.md", 
-            "title": "CREATE VIEW Command", 
-            "url": "/docs/create-view-command/"
+            "previous_title": "CREATE TABLE AS (CTAS)", 
+            "previous_url": "/docs/create-table-as-ctas/", 
+            "relative_path": "_docs/sql-reference/sql-commands/050-create-view.md", 
+            "title": "CREATE VIEW", 
+            "url": "/docs/create-view/"
         }, 
         "Compiling Drill from Source": {
             "breadcrumbs": [
@@ -1684,7 +1684,7 @@
             "title": "Custom Function Interfaces", 
             "url": "/docs/custom-function-interfaces/"
         }, 
-        "DESCRIBE Command": {
+        "DESCRIBE": {
             "breadcrumbs": [
                 {
                     "title": "SQL Commands", 
@@ -1696,14 +1696,35 @@
                 }
             ], 
             "children": [], 
-            "next_title": "EXPLAIN Commands", 
-            "next_url": "/docs/explain-commands/", 
+            "next_title": "EXPLAIN", 
+            "next_url": "/docs/explain/", 
             "parent": "SQL Commands", 
-            "previous_title": "CREATE VIEW Command", 
-            "previous_url": "/docs/create-view-command/", 
-            "relative_path": "_docs/sql-reference/sql-commands/060-describe-command.md", 
-            "title": "DESCRIBE Command", 
-            "url": "/docs/describe-command/"
+            "previous_title": "DROP VIEW", 
+            "previous_url": "/docs/drop-view/", 
+            "relative_path": "_docs/sql-reference/sql-commands/060-describe.md", 
+            "title": "DESCRIBE", 
+            "url": "/docs/describe/"
+        }, 
+        "DROP VIEW": {
+            "breadcrumbs": [
+                {
+                    "title": "SQL Commands", 
+                    "url": "/docs/sql-commands/"
+                }, 
+                {
+                    "title": "SQL Reference", 
+                    "url": "/docs/sql-reference/"
+                }
+            ], 
+            "children": [], 
+            "next_title": "DESCRIBE", 
+            "next_url": "/docs/describe/", 
+            "parent": "SQL Commands", 
+            "previous_title": "CREATE VIEW", 
+            "previous_url": "/docs/create-view/", 
+            "relative_path": "_docs/sql-reference/sql-commands/055-drop-view.md", 
+            "title": "DROP VIEW", 
+            "url": "/docs/drop-view/"
         }, 
         "Data Sources and File Formats": {
             "breadcrumbs": [], 
@@ -2725,7 +2746,7 @@
             "title": "Driver Configuration Options", 
             "url": "/docs/driver-configuration-options/"
         }, 
-        "EXPLAIN Commands": {
+        "EXPLAIN": {
             "breadcrumbs": [
                 {
                     "title": "SQL Commands", 
@@ -2737,14 +2758,14 @@
                 }
             ], 
             "children": [], 
-            "next_title": "SELECT Statements", 
-            "next_url": "/docs/select-statements/", 
+            "next_title": "SELECT", 
+            "next_url": "/docs/select/", 
             "parent": "SQL Commands", 
-            "previous_title": "DESCRIBE Command", 
-            "previous_url": "/docs/describe-command/", 
-            "relative_path": "_docs/sql-reference/sql-commands/070-explain-commands.md", 
-            "title": "EXPLAIN Commands", 
-            "url": "/docs/explain-commands/"
+            "previous_title": "DESCRIBE", 
+            "previous_url": "/docs/describe/", 
+            "relative_path": "_docs/sql-reference/sql-commands/070-explain.md", 
+            "title": "EXPLAIN", 
+            "url": "/docs/explain/"
         }, 
         "Embedded Mode Prerequisites": {
             "breadcrumbs": [
@@ -6003,7 +6024,7 @@
             "title": "Reserved Keywords", 
             "url": "/docs/reserved-keywords/"
         }, 
-        "SELECT Statements": {
+        "SELECT": {
             "breadcrumbs": [
                 {
                     "title": "SQL Commands", 
@@ -6015,16 +6036,205 @@
                 }
             ], 
             "children": [], 
-            "next_title": "SHOW DATABASES and SHOW SCHEMAS Command", 
-            "next_url": "/docs/show-databases-and-show-schemas-command/", 
+            "next_title": "SELECT FROM", 
+            "next_url": "/docs/select-from/", 
             "parent": "SQL Commands", 
-            "previous_title": "EXPLAIN Commands", 
-            "previous_url": "/docs/explain-commands/", 
+            "previous_title": "EXPLAIN", 
+            "previous_url": "/docs/explain/", 
             "relative_path": "_docs/sql-reference/sql-commands/080-select.md", 
-            "title": "SELECT Statements", 
-            "url": "/docs/select-statements/"
+            "title": "SELECT", 
+            "url": "/docs/select/"
+        }, 
+        "SELECT FROM": {
+            "breadcrumbs": [
+                {
+                    "title": "SQL Commands", 
+                    "url": "/docs/sql-commands/"
+                }, 
+                {
+                    "title": "SQL Reference", 
+                    "url": "/docs/sql-reference/"
+                }
+            ], 
+            "children": [], 
+            "next_title": "SELECT GROUP BY", 
+            "next_url": "/docs/select-group-by/", 
+            "parent": "SQL Commands", 
+            "previous_title": "SELECT", 
+            "previous_url": "/docs/select/", 
+            "relative_path": "_docs/sql-reference/sql-commands/081-select-from.md", 
+            "title": "SELECT FROM", 
+            "url": "/docs/select-from/"
+        }, 
+        "SELECT GROUP BY": {
+            "breadcrumbs": [
+                {
+                    "title": "SQL Commands", 
+                    "url": "/docs/sql-commands/"
+                }, 
+                {
+                    "title": "SQL Reference", 
+                    "url": "/docs/sql-reference/"
+                }
+            ], 
+            "children": [], 
+            "next_title": "SELECT HAVING", 
+            "next_url": "/docs/select-having/", 
+            "parent": "SQL Commands", 
+            "previous_title": "SELECT FROM", 
+            "previous_url": "/docs/select-from/", 
+            "relative_path": "_docs/sql-reference/sql-commands/082-select-group-by.md", 
+            "title": "SELECT GROUP BY", 
+            "url": "/docs/select-group-by/"
+        }, 
+        "SELECT HAVING": {
+            "breadcrumbs": [
+                {
+                    "title": "SQL Commands", 
+                    "url": "/docs/sql-commands/"
+                }, 
+                {
+                    "title": "SQL Reference", 
+                    "url": "/docs/sql-reference/"
+                }
+            ], 
+            "children": [], 
+            "next_title": "SELECT LIMIT", 
+            "next_url": "/docs/select-limit/", 
+            "parent": "SQL Commands", 
+            "previous_title": "SELECT GROUP BY", 
+            "previous_url": "/docs/select-group-by/", 
+            "relative_path": "_docs/sql-reference/sql-commands/083-select-having.md", 
+            "title": "SELECT HAVING", 
+            "url": "/docs/select-having/"
+        }, 
+        "SELECT LIMIT": {
+            "breadcrumbs": [
+                {
+                    "title": "SQL Commands", 
+                    "url": "/docs/sql-commands/"
+                }, 
+                {
+                    "title": "SQL Reference", 
+                    "url": "/docs/sql-reference/"
+                }
+            ], 
+            "children": [], 
+            "next_title": "SELECT OFFSET", 
+            "next_url": "/docs/select-offset/", 
+            "parent": "SQL Commands", 
+            "previous_title": "SELECT HAVING", 
+            "previous_url": "/docs/select-having/", 
+            "relative_path": "_docs/sql-reference/sql-commands/084-select-limit.md", 
+            "title": "SELECT LIMIT", 
+            "url": "/docs/select-limit/"
+        }, 
+        "SELECT OFFSET": {
+            "breadcrumbs": [
+                {
+                    "title": "SQL Commands", 
+                    "url": "/docs/sql-commands/"
+                }, 
+                {
+                    "title": "SQL Reference", 
+                    "url": "/docs/sql-reference/"
+                }
+            ], 
+            "children": [], 
+            "next_title": "SELECT ORDER BY", 
+            "next_url": "/docs/select-order-by/", 
+            "parent": "SQL Commands", 
+            "previous_title": "SELECT LIMIT", 
+            "previous_url": "/docs/select-limit/", 
+            "relative_path": "_docs/sql-reference/sql-commands/085-select-offset.md", 
+            "title": "SELECT OFFSET", 
+            "url": "/docs/select-offset/"
+        }, 
+        "SELECT ORDER BY": {
+            "breadcrumbs": [
+                {
+                    "title": "SQL Commands", 
+                    "url": "/docs/sql-commands/"
+                }, 
+                {
+                    "title": "SQL Reference", 
+                    "url": "/docs/sql-reference/"
+                }
+            ], 
+            "children": [], 
+            "next_title": "SELECT UNION", 
+            "next_url": "/docs/select-union/", 
+            "parent": "SQL Commands", 
+            "previous_title": "SELECT OFFSET", 
+            "previous_url": "/docs/select-offset/", 
+            "relative_path": "_docs/sql-reference/sql-commands/086-select-order-by.md", 
+            "title": "SELECT ORDER BY", 
+            "url": "/docs/select-order-by/"
+        }, 
+        "SELECT UNION": {
+            "breadcrumbs": [
+                {
+                    "title": "SQL Commands", 
+                    "url": "/docs/sql-commands/"
+                }, 
+                {
+                    "title": "SQL Reference", 
+                    "url": "/docs/sql-reference/"
+                }
+            ], 
+            "children": [], 
+            "next_title": "SELECT WHERE", 
+            "next_url": "/docs/select-where/", 
+            "parent": "SQL Commands", 
+            "previous_title": "SELECT ORDER BY", 
+            "previous_url": "/docs/select-order-by/", 
+            "relative_path": "_docs/sql-reference/sql-commands/087-select-union.md", 
+            "title": "SELECT UNION", 
+            "url": "/docs/select-union/"
+        }, 
+        "SELECT WHERE": {
+            "breadcrumbs": [
+                {
+                    "title": "SQL Commands", 
+                    "url": "/docs/sql-commands/"
+                }, 
+                {
+                    "title": "SQL Reference", 
+                    "url": "/docs/sql-reference/"
+                }
+            ], 
+            "children": [], 
+            "next_title": "SELECT WITH", 
+            "next_url": "/docs/select-with/", 
+            "parent": "SQL Commands", 
+            "previous_title": "SELECT UNION", 
+            "previous_url": "/docs/select-union/", 
+            "relative_path": "_docs/sql-reference/sql-commands/088-select-where.md", 
+            "title": "SELECT WHERE", 
+            "url": "/docs/select-where/"
+        }, 
+        "SELECT WITH": {
+            "breadcrumbs": [
+                {
+                    "title": "SQL Commands", 
+                    "url": "/docs/sql-commands/"
+                }, 
+                {
+                    "title": "SQL Reference", 
+                    "url": "/docs/sql-reference/"
+                }
+            ], 
+            "children": [], 
+            "next_title": "SHOW DATABASES and SHOW SCHEMAS", 
+            "next_url": "/docs/show-databases-and-show-schemas/", 
+            "parent": "SQL Commands", 
+            "previous_title": "SELECT WHERE", 
+            "previous_url": "/docs/select-where/", 
+            "relative_path": "_docs/sql-reference/sql-commands/089-select-with.md", 
+            "title": "SELECT WITH", 
+            "url": "/docs/select-with/"
         }, 
-        "SHOW DATABASES and SHOW SCHEMAS Command": {
+        "SHOW DATABASES and SHOW SCHEMAS": {
             "breadcrumbs": [
                 {
                     "title": "SQL Commands", 
@@ -6036,16 +6246,16 @@
                 }
             ], 
             "children": [], 
-            "next_title": "SHOW FILES Command", 
-            "next_url": "/docs/show-files-command/", 
+            "next_title": "SHOW FILES", 
+            "next_url": "/docs/show-files/", 
             "parent": "SQL Commands", 
-            "previous_title": "SELECT Statements", 
-            "previous_url": "/docs/select-statements/", 
+            "previous_title": "SELECT WITH", 
+            "previous_url": "/docs/select-with/", 
             "relative_path": "_docs/sql-reference/sql-commands/090-show-databases-and-show-schemas.md", 
-            "title": "SHOW DATABASES and SHOW SCHEMAS Command", 
-            "url": "/docs/show-databases-and-show-schemas-command/"
+            "title": "SHOW DATABASES and SHOW SCHEMAS", 
+            "url": "/docs/show-databases-and-show-schemas/"
         }, 
-        "SHOW FILES Command": {
+        "SHOW FILES": {
             "breadcrumbs": [
                 {
                     "title": "SQL Commands", 
@@ -6057,16 +6267,16 @@
                 }
             ], 
             "children": [], 
-            "next_title": "SHOW TABLES Command", 
-            "next_url": "/docs/show-tables-command/", 
+            "next_title": "SHOW TABLES", 
+            "next_url": "/docs/show-tables/", 
             "parent": "SQL Commands", 
-            "previous_title": "SHOW DATABASES and SHOW SCHEMAS Command", 
-            "previous_url": "/docs/show-databases-and-show-schemas-command/", 
+            "previous_title": "SHOW DATABASES and SHOW SCHEMAS", 
+            "previous_url": "/docs/show-databases-and-show-schemas/", 
             "relative_path": "_docs/sql-reference/sql-commands/100-show-files.md", 
-            "title": "SHOW FILES Command", 
-            "url": "/docs/show-files-command/"
+            "title": "SHOW FILES", 
+            "url": "/docs/show-files/"
         }, 
-        "SHOW TABLES Command": {
+        "SHOW TABLES": {
             "breadcrumbs": [
                 {
                     "title": "SQL Commands", 
@@ -6078,14 +6288,14 @@
                 }
             ], 
             "children": [], 
-            "next_title": "USE Command", 
-            "next_url": "/docs/use-command/", 
+            "next_title": "USE", 
+            "next_url": "/docs/use/", 
             "parent": "SQL Commands", 
-            "previous_title": "SHOW FILES Command", 
-            "previous_url": "/docs/show-files-command/", 
-            "relative_path": "_docs/sql-reference/sql-commands/110-show-tables-command.md", 
-            "title": "SHOW TABLES Command", 
-            "url": "/docs/show-tables-command/"
+            "previous_title": "SHOW FILES", 
+            "previous_url": "/docs/show-files/", 
+            "relative_path": "_docs/sql-reference/sql-commands/110-show-tables.md", 
+            "title": "SHOW TABLES", 
+            "url": "/docs/show-tables/"
         }, 
         "SQL Commands": {
             "breadcrumbs": [
@@ -6107,8 +6317,8 @@
                         }
                     ], 
                     "children": [], 
-                    "next_title": "ALTER SESSION Command", 
-                    "next_url": "/docs/alter-session-command/", 
+                    "next_title": "ALTER SESSION", 
+                    "next_url": "/docs/alter-session/", 
                     "parent": "SQL Commands", 
                     "previous_title": "SQL Commands", 
                     "previous_url": "/docs/sql-commands/", 
@@ -6128,14 +6338,14 @@
                         }
                     ], 
                     "children": [], 
-                    "next_title": "ALTER SYSTEM Command", 
-                    "next_url": "/docs/alter-system-command/", 
+                    "next_title": "ALTER SYSTEM", 
+                    "next_url": "/docs/alter-system/", 
                     "parent": "SQL Commands", 
                     "previous_title": "Supported SQL Commands", 
                     "previous_url": "/docs/supported-sql-commands/", 
-                    "relative_path": "_docs/sql-reference/sql-commands/010-alter-session-command.md", 
-                    "title": "ALTER SESSION Command", 
-                    "url": "/docs/alter-session-command/"
+                    "relative_path": "_docs/sql-reference/sql-commands/010-alter-session.md", 
+                    "title": "ALTER SESSION", 
+                    "url": "/docs/alter-session/"
                 }, 
                 {
                     "breadcrumbs": [
@@ -6149,14 +6359,14 @@
                         }
                     ], 
                     "children": [], 
-                    "next_title": "CREATE TABLE AS (CTAS) Command", 
-                    "next_url": "/docs/create-table-as-ctas-command/", 
+                    "next_title": "CREATE TABLE AS (CTAS)", 
+                    "next_url": "/docs/create-table-as-ctas/", 
                     "parent": "SQL Commands", 
-                    "previous_title": "ALTER SESSION Command", 
-                    "previous_url": "/docs/alter-session-command/", 
+                    "previous_title": "ALTER SESSION", 
+                    "previous_url": "/docs/alter-session/", 
                     "relative_path": "_docs/sql-reference/sql-commands/020-alter-system.md", 
-                    "title": "ALTER SYSTEM Command", 
-                    "url": "/docs/alter-system-command/"
+                    "title": "ALTER SYSTEM", 
+                    "url": "/docs/alter-system/"
                 }, 
                 {
                     "breadcrumbs": [
@@ -6170,14 +6380,14 @@
                         }
                     ], 
                     "children": [], 
-                    "next_title": "CREATE VIEW Command", 
-                    "next_url": "/docs/create-view-command/", 
+                    "next_title": "CREATE VIEW", 
+                    "next_url": "/docs/create-view/", 
                     "parent": "SQL Commands", 
-                    "previous_title": "ALTER SYSTEM Command", 
-                    "previous_url": "/docs/alter-system-command/", 
-                    "relative_path": "_docs/sql-reference/sql-commands/030-create-table-as-command.md", 
-                    "title": "CREATE TABLE AS (CTAS) Command", 
-                    "url": "/docs/create-table-as-ctas-command/"
+                    "previous_title": "ALTER SYSTEM", 
+                    "previous_url": "/docs/alter-system/", 
+                    "relative_path": "_docs/sql-reference/sql-commands/030-create-table-as.md", 
+                    "title": "CREATE TABLE AS (CTAS)", 
+                    "url": "/docs/create-table-as-ctas/"
                 }, 
                 {
                     "breadcrumbs": [
@@ -6191,14 +6401,14 @@
                         }
                     ], 
                     "children": [], 
-                    "next_title": "DESCRIBE Command", 
-                    "next_url": "/docs/describe-command/", 
+                    "next_title": "DROP VIEW", 
+                    "next_url": "/docs/drop-view/", 
                     "parent": "SQL Commands", 
-                    "previous_title": "CREATE TABLE AS (CTAS) Command", 
-                    "previous_url": "/docs/create-table-as-ctas-command/", 
-                    "relative_path": "_docs/sql-reference/sql-commands/050-create-view-command.md", 
-                    "title": "CREATE VIEW Command", 
-                    "url": "/docs/create-view-command/"
+                    "previous_title": "CREATE TABLE AS (CTAS)", 
+                    "previous_url": "/docs/create-table-as-ctas/", 
+                    "relative_path": "_docs/sql-reference/sql-commands/050-create-view.md", 
+                    "title": "CREATE VIEW", 
+                    "url": "/docs/create-view/"
                 }, 
                 {
                     "breadcrumbs": [
@@ -6212,14 +6422,14 @@
                         }
                     ], 
                     "children": [], 
-                    "next_title": "EXPLAIN Commands", 
-                    "next_url": "/docs/explain-commands/", 
+                    "next_title": "DESCRIBE", 
+                    "next_url": "/docs/describe/", 
                     "parent": "SQL Commands", 
-                    "previous_title": "CREATE VIEW Command", 
-                    "previous_url": "/docs/create-view-command/", 
-                    "relative_path": "_docs/sql-reference/sql-commands/060-describe-command.md", 
-                    "title": "DESCRIBE Command", 
-                    "url": "/docs/describe-command/"
+                    "previous_title": "CREATE VIEW", 
+                    "previous_url": "/docs/create-view/", 
+                    "relative_path": "_docs/sql-reference/sql-commands/055-drop-view.md", 
+                    "title": "DROP VIEW", 
+                    "url": "/docs/drop-view/"
                 }, 
                 {
                     "breadcrumbs": [
@@ -6233,14 +6443,14 @@
                         }
                     ], 
                     "children": [], 
-                    "next_title": "SELECT Statements", 
-                    "next_url": "/docs/select-statements/", 
+                    "next_title": "EXPLAIN", 
+                    "next_url": "/docs/explain/", 
                     "parent": "SQL Commands", 
-                    "previous_title": "DESCRIBE Command", 
-                    "previous_url": "/docs/describe-command/", 
-                    "relative_path": "_docs/sql-reference/sql-commands/070-explain-commands.md", 
-                    "title": "EXPLAIN Commands", 
-                    "url": "/docs/explain-commands/"
+                    "previous_title": "DROP VIEW", 
+                    "previous_url": "/docs/drop-view/", 
+                    "relative_path": "_docs/sql-reference/sql-commands/060-describe.md", 
+                    "title": "DESCRIBE", 
+                    "url": "/docs/describe/"
                 }, 
                 {
                     "breadcrumbs": [
@@ -6254,14 +6464,14 @@
                         }
                     ], 
                     "children": [], 
-                    "next_title": "SHOW DATABASES and SHOW SCHEMAS Command", 
-                    "next_url": "/docs/show-databases-and-show-schemas-command/", 
+                    "next_title": "SELECT", 
+                    "next_url": "/docs/select/", 
                     "parent": "SQL Commands", 
-                    "previous_title": "EXPLAIN Commands", 
-                    "previous_url": "/docs/explain-commands/", 
-                    "relative_path": "_docs/sql-reference/sql-commands/080-select.md", 
-                    "title": "SELECT Statements", 
-                    "url": "/docs/select-statements/"
+                    "previous_title": "DESCRIBE", 
+                    "previous_url": "/docs/describe/", 
+                    "relative_path": "_docs/sql-reference/sql-commands/070-explain.md", 
+                    "title": "EXPLAIN", 
+                    "url": "/docs/explain/"
                 }, 
                 {
                     "breadcrumbs": [
@@ -6275,14 +6485,14 @@
                         }
                     ], 
                     "children": [], 
-                    "next_title": "SHOW FILES Command", 
-                    "next_url": "/docs/show-files-command/", 
+                    "next_title": "SELECT FROM", 
+                    "next_url": "/docs/select-from/", 
                     "parent": "SQL Commands", 
-                    "previous_title": "SELECT Statements", 
-                    "previous_url": "/docs/select-statements/", 
-                    "relative_path": "_docs/sql-reference/sql-commands/090-show-databases-and-show-schemas.md", 
-                    "title": "SHOW DATABASES and SHOW SCHEMAS Command", 
-                    "url": "/docs/show-databases-and-show-schemas-command/"
+                    "previous_title": "EXPLAIN", 
+                    "previous_url": "/docs/explain/", 
+                    "relative_path": "_docs/sql-reference/sql-commands/080-select.md", 
+                    "title": "SELECT", 
+                    "url": "/docs/select/"
                 }, 
                 {
                     "breadcrumbs": [
@@ -6296,14 +6506,14 @@
                         }
                     ], 
                     "children": [], 
-                    "next_title": "SHOW TABLES Command", 
-                    "next_url": "/docs/show-tables-command/", 
+                    "next_title": "SELECT GROUP BY", 
+                    "next_url": "/docs/select-group-by/", 
                     "parent": "SQL Commands", 
-                    "previous_title": "SHOW DATABASES and SHOW SCHEMAS Command", 
-                    "previous_url": "/docs/show-databases-and-show-schemas-command/", 
-                    "relative_path": "_docs/sql-reference/sql-commands/100-show-files.md", 
-                    "title": "SHOW FILES Command", 
-                    "url": "/docs/show-files-command/"
+                    "previous_title": "SELECT", 
+                    "previous_url": "/docs/select/", 
+                    "relative_path": "_docs/sql-reference/sql-commands/081-select-from.md", 
+                    "title": "SELECT FROM", 
+                    "url": "/docs/select-from/"
                 }, 
                 {
                     "breadcrumbs": [
@@ -6317,14 +6527,14 @@
                         }
                     ], 
                     "children": [], 
-                    "next_title": "USE Command", 
-                    "next_url": "/docs/use-command/", 
+                    "next_title": "SELECT HAVING", 
+                    "next_url": "/docs/select-having/", 
                     "parent": "SQL Commands", 
-                    "previous_title": "SHOW FILES Command", 
-                    "previous_url": "/docs/show-files-command/", 
-                    "relative_path": "_docs/sql-reference/sql-commands/110-show-tables-command.md", 
-                    "title": "SHOW TABLES Command", 
-                    "url": "/docs/show-tables-command/"
+                    "previous_title": "SELECT FROM", 
+                    "previous_url": "/docs/select-from/", 
+                    "relative_path": "_docs/sql-reference/sql-commands/082-select-group-by.md", 
+                    "title": "SELECT GROUP BY", 
+                    "url": "/docs/select-group-by/"
                 }, 
                 {
                     "breadcrumbs": [
@@ -6338,38 +6548,20 @@
                         }
                     ], 
                     "children": [], 
-                    "next_title": "SQL Conditional Expressions", 
-                    "next_url": "/docs/sql-conditional-expressions/", 
+                    "next_title": "SELECT LIMIT", 
+                    "next_url": "/docs/select-limit/", 
                     "parent": "SQL Commands", 
-                    "previous_title": "SHOW TABLES Command", 
-                    "previous_url": "/docs/show-tables-command/", 
-                    "relative_path": "_docs/sql-reference/sql-commands/120-use-command.md", 
-                    "title": "USE Command", 
-                    "url": "/docs/use-command/"
-                }
-            ], 
-            "next_title": "Supported SQL Commands", 
-            "next_url": "/docs/supported-sql-commands/", 
-            "parent": "SQL Reference", 
-            "previous_title": "Query Directory Functions", 
-            "previous_url": "/docs/query-directory-functions/", 
-            "relative_path": "_docs/sql-reference/070-sql-commands-summary.md", 
-            "title": "SQL Commands", 
-            "url": "/docs/sql-commands/"
-        }, 
-        "SQL Conditional Expressions": {
-            "breadcrumbs": [
-                {
-                    "title": "SQL Reference", 
-                    "url": "/docs/sql-reference/"
-                }
-            ], 
-            "children": [
+                    "previous_title": "SELECT GROUP BY", 
+                    "previous_url": "/docs/select-group-by/", 
+                    "relative_path": "_docs/sql-reference/sql-commands/083-select-having.md", 
+                    "title": "SELECT HAVING", 
+                    "url": "/docs/select-having/"
+                }, 
                 {
                     "breadcrumbs": [
                         {
-                            "title": "SQL Conditional Expressions", 
-                            "url": "/docs/sql-conditional-expressions/"
+                            "title": "SQL Commands", 
+                            "url": "/docs/sql-commands/"
                         }, 
                         {
                             "title": "SQL Reference", 
@@ -6377,55 +6569,20 @@
                         }
                     ], 
                     "children": [], 
-                    "next_title": "Reserved Keywords", 
-                    "next_url": "/docs/reserved-keywords/", 
-                    "parent": "SQL Conditional Expressions", 
-                    "previous_title": "SQL Conditional Expressions", 
-                    "previous_url": "/docs/sql-conditional-expressions/", 
-                    "relative_path": "_docs/sql-reference/sql-conditional-expressions/010-case-expression.md", 
-                    "title": "CASE", 
-                    "url": "/docs/case/"
-                }
-            ], 
-            "next_title": "CASE", 
-            "next_url": "/docs/case/", 
-            "parent": "SQL Reference", 
-            "previous_title": "USE Command", 
-            "previous_url": "/docs/use-command/", 
-            "relative_path": "_docs/sql-reference/075-sql-conditional-expressions.md", 
-            "title": "SQL Conditional Expressions", 
-            "url": "/docs/sql-conditional-expressions/"
-        }, 
-        "SQL Extensions": {
-            "breadcrumbs": [
-                {
-                    "title": "SQL Reference", 
-                    "url": "/docs/sql-reference/"
-                }
-            ], 
-            "children": [], 
-            "next_title": "Data Sources and File Formats", 
-            "next_url": "/docs/data-sources-and-file-formats/", 
-            "parent": "SQL Reference", 
-            "previous_title": "Reserved Keywords", 
-            "previous_url": "/docs/reserved-keywords/", 
-            "relative_path": "_docs/sql-reference/090-sql-extensions.md", 
-            "title": "SQL Extensions", 
-            "url": "/docs/sql-extensions/"
-        }, 
-        "SQL Functions": {
-            "breadcrumbs": [
-                {
-                    "title": "SQL Reference", 
-                    "url": "/docs/sql-reference/"
-                }
-            ], 
-            "children": [
+                    "next_title": "SELECT OFFSET", 
+                    "next_url": "/docs/select-offset/", 
+                    "parent": "SQL Commands", 
+                    "previous_title": "SELECT HAVING", 
+                    "previous_url": "/docs/select-having/", 
+                    "relative_path": "_docs/sql-reference/sql-commands/084-select-limit.md", 
+                    "title": "SELECT LIMIT", 
+                    "url": "/docs/select-limit/"
+                }, 
                 {
                     "breadcrumbs": [
                         {
-                            "title": "SQL Functions", 
-                            "url": "/docs/sql-functions/"
+                            "title": "SQL Commands", 
+                            "url": "/docs/sql-commands/"
                         }, 
                         {
                             "title": "SQL Reference", 
@@ -6433,20 +6590,20 @@
                         }
                     ], 
                     "children": [], 
-                    "next_title": "Math and Trig", 
-                    "next_url": "/docs/math-and-trig/", 
-                    "parent": "SQL Functions", 
-                    "previous_title": "SQL Functions", 
-                    "previous_url": "/docs/sql-functions/", 
-                    "relative_path": "_docs/sql-reference/sql-functions/005-about-sql-function-examples.md", 
-                    "title": "About SQL Function Examples", 
-                    "url": "/docs/about-sql-function-examples/"
+                    "next_title": "SELECT ORDER BY", 
+                    "next_url": "/docs/select-order-by/", 
+                    "parent": "SQL Commands", 
+                    "previous_title": "SELECT LIMIT", 
+                    "previous_url": "/docs/select-limit/", 
+                    "relative_path": "_docs/sql-reference/sql-commands/085-select-offset.md", 
+                    "title": "SELECT OFFSET", 
+                    "url": "/docs/select-offset/"
                 }, 
                 {
                     "breadcrumbs": [
                         {
-                            "title": "SQL Functions", 
-                            "url": "/docs/sql-functions/"
+                            "title": "SQL Commands", 
+                            "url": "/docs/sql-commands/"
                         }, 
                         {
                             "title": "SQL Reference", 
@@ -6454,20 +6611,20 @@
                         }
                     ], 
                     "children": [], 
-                    "next_title": "Data Type Conversion", 
-                    "next_url": "/docs/data-type-conversion/", 
-                    "parent": "SQL Functions", 
-                    "previous_title": "About SQL Function Examples", 
-                    "previous_url": "/docs/about-sql-function-examples/", 
-                    "relative_path": "_docs/sql-reference/sql-functions/010-math-and-trig.md", 
-                    "title": "Math and Trig", 
-                    "url": "/docs/math-and-trig/"
+                    "next_title": "SELECT UNION", 
+                    "next_url": "/docs/select-union/", 
+                    "parent": "SQL Commands", 
+                    "previous_title": "SELECT OFFSET", 
+                    "previous_url": "/docs/select-offset/", 
+                    "relative_path": "_docs/sql-reference/sql-commands/086-select-order-by.md", 
+                    "title": "SELECT ORDER BY", 
+                    "url": "/docs/select-order-by/"
                 }, 
                 {
                     "breadcrumbs": [
                         {
-                            "title": "SQL Functions", 
-                            "url": "/docs/sql-functions/"
+                            "title": "SQL Commands", 
+                            "url": "/docs/sql-commands/"
                         }, 
                         {
                             "title": "SQL Reference", 
@@ -6475,20 +6632,20 @@
                         }
                     ], 
                     "children": [], 
-                    "next_title": "Date/Time Functions and Arithmetic", 
-                    "next_url": "/docs/date-time-functions-and-arithmetic/", 
-                    "parent": "SQL Functions", 
-                    "previous_title": "Math and Trig", 
-                    "previous_url": "/docs/math-and-trig/", 
-                    "relative_path": "_docs/sql-reference/sql-functions/020-data-type-conversion.md", 
-                    "title": "Data Type Conversion", 
-                    "url": "/docs/data-type-conversion/"
+                    "next_title": "SELECT WHERE", 
+                    "next_url": "/docs/select-where/", 
+                    "parent": "SQL Commands", 
+                    "previous_title": "SELECT ORDER BY", 
+                    "previous_url": "/docs/select-order-by/", 
+                    "relative_path": "_docs/sql-reference/sql-commands/087-select-union.md", 
+                    "title": "SELECT UNION", 
+                    "url": "/docs/select-union/"
                 }, 
                 {
                     "breadcrumbs": [
                         {
-                            "title": "SQL Functions", 
-                            "url": "/docs/sql-functions/"
+                            "title": "SQL Commands", 
+                            "url": "/docs/sql-commands/"
                         }, 
                         {
                             "title": "SQL Reference", 
@@ -6496,14 +6653,277 @@
                         }
                     ], 
                     "children": [], 
-                    "next_title": "String Manipulation", 
-                    "next_url": "/docs/string-manipulation/", 
-                    "parent": "SQL Functions", 
-                    "previous_title": "Data Type Conversion", 
-                    "previous_url": "/docs/data-type-conversion/", 
-                    "relative_path": "_docs/sql-reference/sql-functions/030-date-time-functions-and-arithmetic.md", 
-                    "title": "Date/Time Functions and Arithmetic", 
-                    "url": "/docs/date-time-functions-and-arithmetic/"
+                    "next_title": "SELECT WITH", 
+                    "next_url": "/docs/select-with/", 
+                    "parent": "SQL Commands", 
+                    "previous_title": "SELECT UNION", 
+                    "previous_url": "/docs/select-union/", 
+                    "relative_path": "_docs/sql-reference/sql-commands/088-select-where.md", 
+                    "title": "SELECT WHERE", 
+                    "url": "/docs/select-where/"
+                }, 
+                {
+                    "breadcrumbs": [
+                        {
+                            "title": "SQL Commands", 
+                            "url": "/docs/sql-commands/"
+                        }, 
+                        {
+                            "title": "SQL Reference", 
+                            "url": "/docs/sql-reference/"
+                        }
+                    ], 
+                    "children": [], 
+                    "next_title": "SHOW DATABASES and SHOW SCHEMAS", 
+                    "next_url": "/docs/show-databases-and-show-schemas/", 
+                    "parent": "SQL Commands", 
+                    "previous_title": "SELECT WHERE", 
+                    "previous_url": "/docs/select-where/", 
+                    "relative_path": "_docs/sql-reference/sql-commands/089-select-with.md", 
+                    "title": "SELECT WITH", 
+                    "url": "/docs/select-with/"
+                }, 
+                {
+                    "breadcrumbs": [
+                        {
+                            "title": "SQL Commands", 
+                            "url": "/docs/sql-commands/"
+                        }, 
+                        {
+                            "title": "SQL Reference", 
+                            "url": "/docs/sql-reference/"
+                        }
+                    ], 
+                    "children": [], 
+                    "next_title": "SHOW FILES", 
+                    "next_url": "/docs/show-files/", 
+                    "parent": "SQL Commands", 
+                    "previous_title": "SELECT WITH", 
+                    "previous_url": "/docs/select-with/", 
+                    "relative_path": "_docs/sql-reference/sql-commands/090-show-databases-and-show-schemas.md", 
+                    "title": "SHOW DATABASES and SHOW SCHEMAS", 
+                    "url": "/docs/show-databases-and-show-schemas/"
+                }, 
+                {
+                    "breadcrumbs": [
+                        {
+                            "title": "SQL Commands", 
+                            "url": "/docs/sql-commands/"
+                        }, 
+                        {
+                            "title": "SQL Reference", 
+                            "url": "/docs/sql-reference/"
+                        }
+                    ], 
+                    "children": [], 
+                    "next_title": "SHOW TABLES", 
+                    "next_url": "/docs/show-tables/", 
+                    "parent": "SQL Commands", 
+                    "previous_title": "SHOW DATABASES and SHOW SCHEMAS", 
+                    "previous_url": "/docs/show-databases-and-show-schemas/", 
+                    "relative_path": "_docs/sql-reference/sql-commands/100-show-files.md", 
+                    "title": "SHOW FILES", 
+                    "url": "/docs/show-files/"
+                }, 
+                {
+                    "breadcrumbs": [
+                        {
+                            "title": "SQL Commands", 
+                            "url": "/docs/sql-commands/"
+                        }, 
+                        {
+                            "title": "SQL Reference", 
+                            "url": "/docs/sql-reference/"
+                        }
+                    ], 
+                    "children": [], 
+                    "next_title": "USE", 
+                    "next_url": "/docs/use/", 
+                    "parent": "SQL Commands", 
+                    "previous_title": "SHOW FILES", 
+                    "previous_url": "/docs/show-files/", 
+                    "relative_path": "_docs/sql-reference/sql-commands/110-show-tables.md", 
+                    "title": "SHOW TABLES", 
+                    "url": "/docs/show-tables/"
+                }, 
+                {
+                    "breadcrumbs": [
+                        {
+                            "title": "SQL Commands", 
+                            "url": "/docs/sql-commands/"
+                        }, 
+                        {
+                            "title": "SQL Reference", 
+                            "url": "/docs/sql-reference/"
+                        }
+                    ], 
+                    "children": [], 
+                    "next_title": "SQL Conditional Expressions", 
+                    "next_url": "/docs/sql-conditional-expressions/", 
+                    "parent": "SQL Commands", 
+                    "previous_title": "SHOW TABLES", 
+                    "previous_url": "/docs/show-tables/", 
+                    "relative_path": "_docs/sql-reference/sql-commands/120-use.md", 
+                    "title": "USE", 
+                    "url": "/docs/use/"
+                }
+            ], 
+            "next_title": "Supported SQL Commands", 
+            "next_url": "/docs/supported-sql-commands/", 
+            "parent": "SQL Reference", 
+            "previous_title": "Query Directory Functions", 
+            "previous_url": "/docs/query-directory-functions/", 
+            "relative_path": "_docs/sql-reference/070-sql-commands-summary.md", 
+            "title": "SQL Commands", 
+            "url": "/docs/sql-commands/"
+        }, 
+        "SQL Conditional Expressions": {
+            "breadcrumbs": [
+                {
+                    "title": "SQL Reference", 
+                    "url": "/docs/sql-reference/"
+                }
+            ], 
+            "children": [
+                {
+                    "breadcrumbs": [
+                        {
+                            "title": "SQL Conditional Expressions", 
+                            "url": "/docs/sql-conditional-expressions/"
+                        }, 
+                        {
+                            "title": "SQL Reference", 
+                            "url": "/docs/sql-reference/"
+                        }
+                    ], 
+                    "children": [], 
+                    "next_title": "Reserved Keywords", 
+                    "next_url": "/docs/reserved-keywords/", 
+                    "parent": "SQL Conditional Expressions", 
+                    "previous_title": "SQL Conditional Expressions", 
+                    "previous_url": "/docs/sql-conditional-expressions/", 
+                    "relative_path": "_docs/sql-reference/sql-conditional-expressions/010-case-expression.md", 
+                    "title": "CASE", 
+                    "url": "/docs/case/"
+                }
+            ], 
+            "next_title": "CASE", 
+            "next_url": "/docs/case/", 
+            "parent": "SQL Reference", 
+            "previous_title": "USE", 
+            "previous_url": "/docs/use/", 
+            "relative_path": "_docs/sql-reference/075-sql-conditional-expressions.md", 
+            "title": "SQL Conditional Expressions", 
+            "url": "/docs/sql-conditional-expressions/"
+        }, 
+        "SQL Extensions": {
+            "breadcrumbs": [
+                {
+                    "title": "SQL Reference", 
+                    "url": "/docs/sql-reference/"
+                }
+            ], 
+            "children": [], 
+            "next_title": "Data Sources and File Formats", 
+            "next_url": "/docs/data-sources-and-file-formats/", 
+            "parent": "SQL Reference", 
+            "previous_title": "Reserved Keywords", 
+            "previous_url": "/docs/reserved-keywords/", 
+            "relative_path": "_docs/sql-reference/090-sql-extensions.md", 
+            "title": "SQL Extensions", 
+            "url": "/docs/sql-extensions/"
+        }, 
+        "SQL Functions": {
+            "breadcrumbs": [
+                {
+                    "title": "SQL Reference", 
+                    "url": "/docs/sql-reference/"
+                }
+            ], 
+            "children": [
+                {
+                    "breadcrumbs": [
+                        {
+                            "title": "SQL Functions", 
+                            "url": "/docs/sql-functions/"
+                        }, 
+                        {
+                            "title": "SQL Reference", 
+                            "url": "/docs/sql-reference/"
+                        }
+                    ], 
+                    "children": [], 
+                    "next_title": "Math and Trig", 
+                    "next_url": "/docs/math-and-trig/", 
+                    "parent": "SQL Functions", 
+                    "previous_title": "SQL Functions", 
+                    "previous_url": "/docs/sql-functions/", 
+                    "relative_path": "_docs/sql-reference/sql-functions/005-about-sql-function-examples.md", 
+                    "title": "About SQL Function Examples", 
+                    "url": "/docs/about-sql-function-examples/"
+                }, 
+                {
+                    "breadcrumbs": [
+                        {
+                            "title": "SQL Functions", 
+                            "url": "/docs/sql-functions/"
+                        }, 
+                        {
+                            "title": "SQL Reference", 
+                            "url": "/docs/sql-reference/"
+                        }
+                    ], 
+                    "children": [], 
+                    "next_title": "Data Type Conversion", 
+                    "next_url": "/docs/data-type-conversion/", 
+                    "parent": "SQL Functions", 
+                    "previous_title": "About SQL Function Examples", 
+                    "previous_url": "/docs/about-sql-function-examples/", 
+                    "relative_path": "_docs/sql-reference/sql-functions/010-math-and-trig.md", 
+                    "title": "Math and Trig", 
+                    "url": "/docs/math-and-trig/"
+                }, 
+                {
+                    "breadcrumbs": [
+                        {
+                            "title": "SQL Functions", 
+                            "url": "/docs/sql-functions/"
+                        }, 
+                        {
+                            "title": "SQL Reference", 
+                            "url": "/docs/sql-reference/"
+                        }
+                    ], 
+                    "children": [], 
+                    "next_title": "Date/Time Functions and Arithmetic", 
+                    "next_url": "/docs/date-time-functions-and-arithmetic/", 
+                    "parent": "SQL Functions", 
+                    "previous_title": "Math and Trig", 
+                    "previous_url": "/docs/math-and-trig/", 
+                    "relative_path": "_docs/sql-reference/sql-functions/020-data-type-conversion.md", 
+                    "title": "Data Type Conversion", 
+                    "url": "/docs/data-type-conversion/"
+                }, 
+                {
+                    "breadcrumbs": [
+                        {
+                            "title": "SQL Functions", 
+                            "url": "/docs/sql-functions/"
+                        }, 
+                        {
+                            "title": "SQL Reference", 
+                            "url": "/docs/sql-reference/"
+                        }
+                    ], 
+                    "children": [], 
+                    "next_title": "String Manipulation", 
+                    "next_url": "/docs/string-manipulation/", 
+                    "parent": "SQL Functions", 
+                    "previous_title": "Data Type Conversion", 
+                    "previous_url": "/docs/data-type-conversion/", 
+                    "relative_path": "_docs/sql-reference/sql-functions/030-date-time-functions-and-arithmetic.md", 
+                    "title": "Date/Time Functions and Arithmetic", 
+                    "url": "/docs/date-time-functions-and-arithmetic/"
                 }, 
                 {
                     "breadcrumbs": [
@@ -7038,8 +7458,8 @@
                                 }
                             ], 
                             "children": [], 
-                            "next_title": "ALTER SESSION Command", 
-                            "next_url": "/docs/alter-session-command/", 
+                            "next_title": "ALTER SESSION", 
+                            "next_url": "/docs/alter-session/", 
                             "parent": "SQL Commands", 
                             "previous_title": "SQL Commands", 
                             "previous_url": "/docs/sql-commands/", 
@@ -7059,14 +7479,14 @@
                                 }
                             ], 
                             "children": [], 
-                            "next_title": "ALTER SYSTEM Command", 
-                            "next_url": "/docs/alter-system-command/", 
+                            "next_title": "ALTER SYSTEM", 
+                            "next_url": "/docs/alter-system/", 
                             "parent": "SQL Commands", 
                             "previous_title": "Supported SQL Commands", 
                             "previous_url": "/docs/supported-sql-commands/", 
-                            "relative_path": "_docs/sql-reference/sql-commands/010-alter-session-command.md", 
-                            "title": "ALTER SESSION Command", 
-                            "url": "/docs/alter-session-command/"
+                            "relative_path": "_docs/sql-reference/sql-commands/010-alter-session.md", 
+                            "title": "ALTER SESSION", 
+                            "url": "/docs/alter-session/"
                         }, 
                         {
                             "breadcrumbs": [
@@ -7080,14 +7500,14 @@
                                 }
                             ], 
                             "children": [], 
-                            "next_title": "CREATE TABLE AS (CTAS) Command", 
-                            "next_url": "/docs/create-table-as-ctas-command/", 
+                            "next_title": "CREATE TABLE AS (CTAS)", 
+                            "next_url": "/docs/create-table-as-ctas/", 
                             "parent": "SQL Commands", 
-                            "previous_title": "ALTER SESSION Command", 
-                            "previous_url": "/docs/alter-session-command/", 
+                            "previous_title": "ALTER SESSION", 
+                            "previous_url": "/docs/alter-session/", 
                             "relative_path": "_docs/sql-reference/sql-commands/020-alter-system.md", 
-                            "title": "ALTER SYSTEM Command", 
-                            "url": "/docs/alter-system-command/"
+                            "title": "ALTER SYSTEM", 
+                            "url": "/docs/alter-system/"
                         }, 
                         {
                             "breadcrumbs": [
@@ -7101,14 +7521,14 @@
                                 }
                             ], 
                             "children": [], 
-                            "next_title": "CREATE VIEW Command", 
-                            "next_url": "/docs/create-view-command/", 
+                            "next_title": "CREATE VIEW", 
+                            "next_url": "/docs/create-view/", 
                             "parent": "SQL Commands", 
-                            "previous_title": "ALTER SYSTEM Command", 
-                            "previous_url": "/docs/alter-system-command/", 
-                            "relative_path": "_docs/sql-reference/sql-commands/030-create-table-as-command.md", 
-                            "title": "CREATE TABLE AS (CTAS) Command", 
-                            "url": "/docs/create-table-as-ctas-command/"
+                            "previous_title": "ALTER SYSTEM", 
+                            "previous_url": "/docs/alter-system/", 
+                            "relative_path": "_docs/sql-reference/sql-commands/030-create-table-as.md", 
+                            "title": "CREATE TABLE AS (CTAS)", 
+                            "url": "/docs/create-table-as-ctas/"
                         }, 
                         {
                             "breadcrumbs": [
@@ -7122,14 +7542,14 @@
                                 }
                             ], 
                             "children": [], 
-                            "next_title": "DESCRIBE Command", 
-                            "next_url": "/docs/describe-command/", 
+                            "next_title": "DROP VIEW", 
+                            "next_url": "/docs/drop-view/", 
                             "parent": "SQL Commands", 
-                            "previous_title": "CREATE TABLE AS (CTAS) Command", 
-                            "previous_url": "/docs/create-table-as-ctas-command/", 
-                            "relative_path": "_docs/sql-reference/sql-commands/050-create-view-command.md", 
-                            "title": "CREATE VIEW Command", 
-                            "url": "/docs/create-view-command/"
+                            "previous_title": "CREATE TABLE AS (CTAS)", 
+                            "previous_url": "/docs/create-table-as-ctas/", 
+                            "relative_path": "_docs/sql-reference/sql-commands/050-create-view.md", 
+                            "title": "CREATE VIEW", 
+                            "url": "/docs/create-view/"
                         }, 
                         {
                             "breadcrumbs": [
@@ -7143,14 +7563,14 @@
                                 }
                             ], 
                             "children": [], 
-                            "next_title": "EXPLAIN Commands", 
-                            "next_url": "/docs/explain-commands/", 
+                            "next_title": "DESCRIBE", 
+                            "next_url": "/docs/describe/", 
                             "parent": "SQL Commands", 
-                            "previous_title": "CREATE VIEW Command", 
-                            "previous_url": "/docs/create-view-command/", 
-                            "relative_path": "_docs/sql-reference/sql-commands/060-describe-command.md", 
-                            "title": "DESCRIBE Command", 
-                            "url": "/docs/describe-command/"
+                            "previous_title": "CREATE VIEW", 
+                            "previous_url": "/docs/create-view/", 
+                            "relative_path": "_docs/sql-reference/sql-commands/055-drop-view.md", 
+                            "title": "DROP VIEW", 
+                            "url": "/docs/drop-view/"
                         }, 
                         {
                             "breadcrumbs": [
@@ -7164,14 +7584,14 @@
                                 }
                             ], 
                             "children": [], 
-                            "next_title": "SELECT Statements", 
-                            "next_url": "/docs/select-statements/", 
+                            "next_title": "EXPLAIN", 
+                            "next_url": "/docs/explain/", 
                             "parent": "SQL Commands", 
-                            "previous_title": "DESCRIBE Command", 
-                            "previous_url": "/docs/describe-command/", 
-                            "relative_path": "_docs/sql-reference/sql-commands/070-explain-commands.md", 
-                            "title": "EXPLAIN Commands", 
-                            "url": "/docs/explain-commands/"
+                            "previous_title": "DROP VIEW", 
+                            "previous_url": "/docs/drop-view/", 
+                            "relative_path": "_docs/sql-reference/sql-commands/060-describe.md", 
+                            "title": "DESCRIBE", 
+                            "url": "/docs/describe/"
                         }, 
                         {
                             "breadcrumbs": [
@@ -7185,14 +7605,224 @@
                                 }
                             ], 
                             "children": [], 
-                            "next_title": "SHOW DATABASES and SHOW SCHEMAS Command", 
-                            "next_url": "/docs/show-databases-and-show-schemas-command/", 
+                            "next_title": "SELECT", 
+                            "next_url": "/docs/select/", 
                             "parent": "SQL Commands", 
-                            "previous_title": "EXPLAIN Commands", 
-                            "previous_url": "/docs/explain-commands/", 
+                            "previous_title": "DESCRIBE", 
+                            "previous_url": "/docs/describe/", 
+                            "relative_path": "_docs/sql-reference/sql-commands/070-explain.md", 
+                            "title": "EXPLAIN", 
+                            "url": "/docs/explain/"
+                        }, 
+                        {
+                            "breadcrumbs": [
+                                {
+                                    "title": "SQL Commands", 
+                                    "url": "/docs/sql-commands/"
+                                }, 
+                                {
+                                    "title": "SQL Reference", 
+                                    "url": "/docs/sql-reference/"
+                                }
+                            ], 
+                            "children": [], 
+                            "next_title": "SELECT FROM", 
+                            "next_url": "/docs/select-from/", 
+                            "parent": "SQL Commands", 
+                            "previous_title": "EXPLAIN", 
+                            "previous_url": "/docs/explain/", 
                             "relative_path": "_docs/sql-reference/sql-commands/080-select.md", 
-                            "title": "SELECT Statements", 
-                            "url": "/docs/select-statements/"
+                            "title": "SELECT", 
+                            "url": "/docs/select/"
+                        }, 
+                        {
+                            "breadcrumbs": [
+                                {
+                                    "title": "SQL Commands", 
+                                    "url": "/docs/sql-commands/"
+                                }, 
+                                {
+                                    "title": "SQL Reference", 
+                                    "url": "/docs/sql-reference/"
+                                }
+                            ], 
+                            "children": [], 
+                            "next_title": "SELECT GROUP BY", 
+                            "next_url": "/docs/select-group-by/", 
+                            "parent": "SQL Commands", 
+                            "previous_title": "SELECT", 
+                            "previous_url": "/docs/select/", 
+                            "relative_path": "_docs/sql-reference/sql-commands/081-select-from.md", 
+                            "title": "SELECT FROM", 
+                            "url": "/docs/select-from/"
+                        }, 
+                        {
+                            "breadcrumbs": [
+                                {
+                                    "title": "SQL Commands", 
+                                    "url": "/docs/sql-commands/"
+                                }, 
+                                {
+                                    "title": "SQL Reference", 
+                                    "url": "/docs/sql-reference/"
+                                }
+                            ], 
+                            "children": [], 
+                            "next_title": "SELECT HAVING", 
+                            "next_url": "/docs/select-having/", 
+                            "parent": "SQL Commands", 
+                            "previous_title": "SELECT FROM", 
+                            "previous_url": "/docs/select-from/", 
+                            "relative_path": "_docs/sql-reference/sql-commands/082-select-group-by.md", 
+                            "title": "SELECT GROUP BY", 
+                            "url": "/docs/select-group-by/"
+                        }, 
+                        {
+                            "breadcrumbs": [
+                                {
+                                    "title": "SQL Commands", 
+                                    "url": "/docs/sql-commands/"
+                                }, 
+                                {
+                                    "title": "SQL Reference", 
+                                    "url": "/docs/sql-reference/"
+                                }
+                            ], 
+                            "children": [], 
+                            "next_title": "SELECT LIMIT", 
+                            "next_url": "/docs/select-limit/", 
+                            "parent": "SQL Commands", 
+                            "previous_title": "SELECT GROUP BY", 
+                            "previous_url": "/docs/select-group-by/", 
+                            "relative_path": "_docs/sql-reference/sql-commands/083-select-having.md", 
+                            "title": "SELECT HAVING", 
+                            "url": "/docs/select-having/"
+                        }, 
+                        {
+                            "breadcrumbs": [
+                                {
+                                    "title": "SQL Commands", 
+                                    "url": "/docs/sql-commands/"
+                                }, 
+                                {
+                                    "title": "SQL Reference", 
+                                    "url": "/docs/sql-reference/"
+                                }
+                            ], 
+                            "children": [], 
+                            "next_title": "SELECT OFFSET", 
+                            "next_url": "/docs/select-offset/", 
+                            "parent": "SQL Commands", 
+                            "previous_title": "SELECT HAVING", 
+                            "previous_url": "/docs/select-having/", 
+                            "relative_path": "_docs/sql-reference/sql-commands/084-select-limit.md", 
+                            "title": "SELECT LIMIT", 
+                            "url": "/docs/select-limit/"
+                        }, 
+                        {
+                            "breadcrumbs": [
+                                {
+                                    "title": "SQL Commands", 
+                                    "url": "/docs/sql-commands/"
+                                }, 
+                                {
+                                    "title": "SQL Reference", 
+                                    "url": "/docs/sql-reference/"
+                                }
+                            ], 
+                            "children": [], 
+                            "next_title": "SELECT ORDER BY", 
+                            "next_url": "/docs/select-order-by/", 
+                            "parent": "SQL Commands", 
+                            "previous_title": "SELECT LIMIT", 
+                            "previous_url": "/docs/select-limit/", 
+                            "relative_path": "_docs/sql-reference/sql-commands/085-select-offset.md", 
+                            "title": "SELECT OFFSET", 
+                            "url": "/docs/select-offset/"
+                        }, 
+                        {
+                            "breadcrumbs": [
+                                {
+                                    "title": "SQL Commands", 
+                                    "url": "/docs/sql-commands/"
+                                }, 
+                                {
+                                    "title": "SQL Reference", 
+                                    "url": "/docs/sql-reference/"
+                                }
+                            ], 
+                            "children": [], 
+                            "next_title": "SELECT UNION", 
+                            "next_url": "/docs/select-union/", 
+                            "parent": "SQL Commands", 
+                            "previous_title": "SELECT OFFSET", 
+                            "previous_url": "/docs/select-offset/", 
+                            "relative_path": "_docs/sql-reference/sql-commands/086-select-order-by.md", 
+                            "title": "SELECT ORDER BY", 
+                            "url": "/docs/select-order-by/"
+                        }, 
+                        {
+                            "breadcrumbs": [
+                                {
+                                    "title": "SQL Commands", 
+                                    "url": "/docs/sql-commands/"
+                                }, 
+                                {
+                                    "title": "SQL Reference", 
+                                    "url": "/docs/sql-reference/"
+                                }
+                            ], 
+                            "children": [], 
+                            "next_title": "SELECT WHERE", 
+                            "next_url": "/docs/select-where/", 
+                            "parent": "SQL Commands", 
+                            "previous_title": "SELECT ORDER BY", 
+                            "previous_url": "/docs/select-order-by/", 
+                            "relative_path": "_docs/sql-reference/sql-commands/087-select-union.md", 
+                            "title": "SELECT UNION", 
+                            "url": "/docs/select-union/"
+                        }, 
+                        {
+                            "breadcrumbs": [
+                                {
+                                    "title": "SQL Commands", 
+                                    "url": "/docs/sql-commands/"
+                                }, 
+                                {
+                                    "title": "SQL Reference", 
+                                    "url": "/docs/sql-reference/"
+                                }
+                            ], 
+                            "children": [], 
+                            "next_title": "SELECT WITH", 
+                            "next_url": "/docs/select-with/", 
+                            "parent": "SQL Commands", 
+                            "previous_title": "SELECT UNION", 
+                            "previous_url": "/docs/select-union/", 
+                            "relative_path": "_docs/sql-reference/sql-commands/088-select-where.md", 
+                            "title": "SELECT WHERE", 
+                            "url": "/docs/select-where/"
+                        }, 
+                        {
+                            "breadcrumbs": [
+                                {
+                                    "title": "SQL Commands", 
+                                    "url": "/docs/sql-commands/"
+                                }, 
+                                {
+                                    "title": "SQL Reference", 
+                                    "url": "/docs/sql-reference/"
+                                }
+                            ], 
+                            "children": [], 
+                            "next_title": "SHOW DATABASES and SHOW SCHEMAS", 
+                            "next_url": "/docs/show-databases-and-show-schemas/", 
+                            "parent": "SQL Commands", 
+                            "previous_title": "SELECT WHERE", 
+                            "previous_url": "/docs/select-where/", 
+                            "relative_path": "_docs/sql-reference/sql-commands/089-select-with.md", 
+                            "title": "SELECT WITH", 
+                            "url": "/docs/select-with/"
                         }, 
                         {
                             "breadcrumbs": [
@@ -7206,14 +7836,14 @@
                                 }
                             ], 
                             "children": [], 
-                            "next_title": "SHOW FILES Command", 
-                            "next_url": "/docs/show-files-command/", 
+                            "next_title": "SHOW FILES", 
+                            "next_url": "/docs/show-files/", 
                             "parent": "SQL Commands", 
-                            "previous_title": "SELECT Statements", 
-                            "previous_url": "/docs/select-statements/", 
+                            "previous_title": "SELECT WITH", 
+                            "previous_url": "/docs/select-with/", 
                             "relative_path": "_docs/sql-reference/sql-commands/090-show-databases-and-show-schemas.md", 
-                            "title": "SHOW DATABASES and SHOW SCHEMAS Command", 
-                            "url": "/docs/show-databases-and-show-schemas-command/"
+                            "title": "SHOW DATABASES and SHOW SCHEMAS", 
+                            "url": "/docs/show-databases-and-show-schemas/"
                         }, 
                         {
                             "breadcrumbs": [
@@ -7227,14 +7857,14 @@
                                 }
                             ], 
                             "children": [], 
-                            "next_title": "SHOW TABLES Command", 
-                            "next_url": "/docs/show-tables-command/", 
+                            "next_title": "SHOW TABLES", 
+                            "next_url": "/docs/show-tables/", 
                             "parent": "SQL Commands", 
-                            "previous_title": "SHOW DATABASES and SHOW SCHEMAS Command", 
-                            "previous_url": "/docs/show-databases-and-show-schemas-command/", 
+                            "previous_title": "SHOW DATABASES and SHOW SCHEMAS", 
+                            "previous_url": "/docs/show-databases-and-show-schemas/", 
                             "relative_path": "_docs/sql-reference/sql-commands/100-show-files.md", 
-                            "title": "SHOW FILES Command", 
-                            "url": "/docs/show-files-command/"
+                            "title": "SHOW FILES", 
+                            "url": "/docs/show-files/"
                         }, 
                         {
                             "breadcrumbs": [
@@ -7248,14 +7878,14 @@
                                 }
                             ], 
                             "children": [], 
-                            "next_title": "USE Command", 
-                            "next_url": "/docs/use-command/", 
+                            "next_title": "USE", 
+                            "next_url": "/docs/use/", 
                             "parent": "SQL Commands", 
-                            "previous_title": "SHOW FILES Command", 
-                            "previous_url": "/docs/show-files-command/", 
-                            "relative_path": "_docs/sql-reference/sql-commands/110-show-tables-command.md", 
-                            "title": "SHOW TABLES Command", 
-                            "url": "/docs/show-tables-command/"
+                            "previous_title": "SHOW FILES", 
+                            "previous_url": "/docs/show-files/", 
+                            "relative_path": "_docs/sql-reference/sql-commands/110-show-tables.md", 
+                            "title": "SHOW TABLES", 
+                            "url": "/docs/show-tables/"
                         }, 
                         {
                             "breadcrumbs": [
@@ -7272,11 +7902,11 @@
                             "next_title": "SQL Conditional Expressions", 
                             "next_url": "/docs/sql-conditional-expressions/", 
                             "parent": "SQL Commands", 
-                            "previous_title": "SHOW TABLES Command", 
-                            "previous_url": "/docs/show-tables-command/", 
-                            "relative_path": "_docs/sql-reference/sql-commands/120-use-command.md", 
-                            "title": "USE Command", 
-                            "url": "/docs/use-command/"
+                            "previous_title": "SHOW TABLES", 
+                            "previous_url": "/docs/show-tables/", 
+                            "relative_path": "_docs/sql-reference/sql-commands/120-use.md", 
+                            "title": "USE", 
+                            "url": "/docs/use/"
                         }
                     ], 
                     "next_title": "Supported SQL Commands", 
@@ -7321,8 +7951,8 @@
                     "next_title": "CASE", 
                     "next_url": "/docs/case/", 
                     "parent": "SQL Reference", 
-                    "previous_title": "USE Command", 
-                    "previous_url": "/docs/use-command/", 
+                    "previous_title": "USE", 
+                    "previous_url": "/docs/use/", 
                     "relative_path": "_docs/sql-reference/075-sql-conditional-expressions.md", 
                     "title": "SQL Conditional Expressions", 
                     "url": "/docs/sql-conditional-expressions/"
@@ -7856,8 +8486,8 @@
                 }
             ], 
             "children": [], 
-            "next_title": "ALTER SESSION Command", 
-            "next_url": "/docs/alter-session-command/", 
+            "next_title": "ALTER SESSION", 
+            "next_url": "/docs/alter-session/", 
             "parent": "SQL Commands", 
             "previous_title": "SQL Commands", 
             "previous_url": "/docs/sql-commands/", 
@@ -8187,7 +8817,7 @@
             "title": "Tutorials Introduction", 
             "url": "/docs/tutorials-introduction/"
         }, 
-        "USE Command": {
+        "USE": {
             "breadcrumbs": [
                 {
                     "title": "SQL Commands", 
@@ -8202,11 +8832,11 @@
             "next_title": "SQL Conditional Expressions", 
             "next_url": "/docs/sql-conditional-expressions/", 
             "parent": "SQL Commands", 
-            "previous_title": "SHOW TABLES Command", 
-            "previous_url": "/docs/show-tables-command/", 
-            "relative_path": "_docs/sql-reference/sql-commands/120-use-command.md", 
-            "title": "USE Command", 
-            "url": "/docs/use-command/"
+            "previous_title": "SHOW TABLES", 
+            "previous_url": "/docs/show-tables/", 
+            "relative_path": "_docs/sql-reference/sql-commands/120-use.md", 
+            "title": "USE", 
+            "url": "/docs/use/"
         }, 
         "Useful Research": {
             "breadcrumbs": [
@@ -11272,8 +11902,8 @@
                                 }
                             ], 
                             "children": [], 
-                            "next_title": "ALTER SESSION Command", 
-                            "next_url": "/docs/alter-session-command/", 
+                            "next_title": "ALTER SESSION", 
+                            "next_url": "/docs/alter-session/", 
                             "parent": "SQL Commands", 
                             "previous_title": "SQL Commands", 
                             "previous_url": "/docs/sql-commands/", 
@@ -11293,14 +11923,14 @@
                                 }
                             ], 
                             "children": [], 
-                            "next_title": "ALTER SYSTEM Command", 
-                            "next_url": "/docs/alter-system-command/", 
+                            "next_title": "ALTER SYSTEM", 
+                            "next_url": "/docs/alter-system/", 
                             "parent": "SQL Commands", 
                             "previous_title": "Supported SQL Commands", 
                             "previous_url": "/docs/supported-sql-commands/", 
-                            "relative_path": "_docs/sql-reference/sql-commands/010-alter-session-command.md", 
-                            "title": "ALTER SESSION Command", 
-                            "url": "/docs/alter-session-command/"
+                            "relative_path": "_docs/sql-reference/sql-commands/010-alter-session.md", 
+                            "title": "ALTER SESSION", 
+                            "url": "/docs/alter-session/"
                         }, 
                         {
                             "breadcrumbs": [
@@ -11314,14 +11944,14 @@
                                 }
                             ], 
                             "children": [], 
-                            "next_title": "CREATE TABLE AS (CTAS) Command", 
-                            "next_url": "/docs/create-table-as-ctas-command/", 
+                            "next_title": "CREATE TABLE AS (CTAS)", 
+                            "next_url": "/docs/create-table-as-ctas/", 
                             "parent": "SQL Commands", 
-                            "previous_title": "ALTER SESSION Command", 
-                            "previous_url": "/docs/alter-session-command/", 
+                            "previous_title": "ALTER SESSION", 
+                            "previous_url": "/docs/alter-session/", 
                             "relative_path": "_docs/sql-reference/sql-commands/020-alter-system.md", 
-                            "title": "ALTER SYSTEM Command", 
-                            "url": "/docs/alter-system-command/"
+                            "title": "ALTER SYSTEM", 
+                            "url": "/docs/alter-system/"
                         }, 
                         {
                             "breadcrumbs": [
@@ -11335,14 +11965,14 @@
                                 }
                             ], 
                             "children": [], 
-                            "next_title": "CREATE VIEW Command", 
-                            "next_url": "/docs/create-view-command/", 
+                            "next_title": "CREATE VIEW", 
+                            "next_url": "/docs/create-view/", 
                             "parent": "SQL Commands", 
-                            "previous_title": "ALTER SYSTEM Command", 
-                            "previous_url": "/docs/alter-system-command/", 
-                            "relative_path": "_docs/sql-reference/sql-commands/030-create-table-as-command.md", 
-                            "title": "CREATE TABLE AS (CTAS) Command", 
-                            "url": "/docs/create-table-as-ctas-command/"
+                            "previous_title": "ALTER SYSTEM", 
+                            "previous_url": "/docs/alter-system/", 
+                            "relative_path": "_docs/sql-reference/sql-commands/030-create-table-as.md", 
+                            "title": "CREATE TABLE AS (CTAS)", 
+                            "url": "/docs/create-table-as-ctas/"
                         }, 
                         {
                             "breadcrumbs": [
@@ -11356,14 +11986,14 @@
                                 }
                             ], 
                             "children": [], 
-                            "next_title": "DESCRIBE Command", 
-                            "next_url": "/docs/describe-command/", 
+                            "next_title": "DROP VIEW", 
+                            "next_url": "/docs/drop-view/", 
                             "parent": "SQL Commands", 
-                            "previous_title": "CREATE TABLE AS (CTAS) Command", 
-                            "previous_url": "/docs/create-table-as-ctas-command/", 
-                            "relative_path": "_docs/sql-reference/sql-commands/050-create-view-command.md", 
-                            "title": "CREATE VIEW Command", 
-                            "url": "/docs/create-view-command/"
+                            "previous_title": "CREATE TABLE AS (CTAS)", 
+                            "previous_url": "/docs/create-table-as-ctas/", 
+                            "relative_path": "_docs/sql-reference/sql-commands/050-create-view.md", 
+                            "title": "CREATE VIEW", 
+                            "url": "/docs/create-view/"
                         }, 
                         {
                             "breadcrumbs": [
@@ -11377,14 +12007,14 @@
                                 }
                             ], 
                             "children": [], 
-                            "next_title": "EXPLAIN Commands", 
-                            "next_url": "/docs/explain-commands/", 
+                            "next_title": "DESCRIBE", 
+                            "next_url": "/docs/describe/", 
                             "parent": "SQL Commands", 
-                            "previous_title": "CREATE VIEW Command", 
-                            "previous_url": "/docs/create-view-command/", 
-                            "relative_path": "_docs/sql-reference/sql-commands/060-describe-command.md", 
-                            "title": "DESCRIBE Command", 
-                            "url": "/docs/describe-command/"
+                            "previous_title": "CREATE VIEW", 
+                            "previous_url": "/docs/create-view/", 
+                            "relative_path": "_docs/sql-reference/sql-commands/055-drop-view.md", 
+                            "title": "DROP VIEW", 
+                            "url": "/docs/drop-view/"
                         }, 
                         {
                             "breadcrumbs": [
@@ -11398,14 +12028,14 @@
                                 }
                             ], 
                             "children": [], 
-                            "next_title": "SELECT Statements", 
-                            "next_url": "/docs/select-statements/", 
+                            "next_title": "EXPLAIN", 
+                            "next_url": "/docs/explain/", 
                             "parent": "SQL Commands", 
-                            "previous_title": "DESCRIBE Command", 
-                            "previous_url": "/docs/describe-command/", 
-                            "relative_path": "_docs/sql-reference/sql-commands/070-explain-commands.md", 
-                            "title": "EXPLAIN Commands", 
-                            "url": "/

<TRUNCATED>

[06/31] drill git commit: stale avro references

Posted by ts...@apache.org.
stale avro references


Project: http://git-wip-us.apache.org/repos/asf/drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/drill/commit/99af7ad3
Tree: http://git-wip-us.apache.org/repos/asf/drill/tree/99af7ad3
Diff: http://git-wip-us.apache.org/repos/asf/drill/diff/99af7ad3

Branch: refs/heads/gh-pages
Commit: 99af7ad3e5924c32ad6620bd778b39ebb3d2d480
Parents: 313dbd6
Author: Kristine Hahn <kh...@maprtech.com>
Authored: Sun May 17 12:57:06 2015 -0700
Committer: Kristine Hahn <kh...@maprtech.com>
Committed: Sun May 17 12:57:06 2015 -0700

----------------------------------------------------------------------
 .../010-architecture-introduction.md            | 45 +-------------------
 .../architectural-highlights/010-flexibility.md |  2 +-
 .../120-configuring-the-drill-shell.md          |  2 +-
 .../040-parquet-format.md                       |  2 +-
 .../020-apache-drill-contribution-ideas.md      |  1 -
 _docs/getting-started/010-drill-introduction.md |  4 +-
 6 files changed, 7 insertions(+), 49 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/drill/blob/99af7ad3/_docs/architecture/010-architecture-introduction.md
----------------------------------------------------------------------
diff --git a/_docs/architecture/010-architecture-introduction.md b/_docs/architecture/010-architecture-introduction.md
index 67fa416..45fad19 100644
--- a/_docs/architecture/010-architecture-introduction.md
+++ b/_docs/architecture/010-architecture-introduction.md
@@ -48,51 +48,8 @@ The flow of a Drill query typically involves the following steps:
 
 You can access Drill through the following interfaces:
 
-  * [Drill shell (SQLLine)]({{ site.baseurl }}/docs/install-drill)
+  * [Drill shell]({{ site.baseurl }}/docs/install-drill)
   * [Drill Web UI]({{ site.baseurl }}/docs/monitoring-and-canceling-queries-in-the-drill-web-ui)
   * [ODBC/JDBC]({{ site.baseurl }}/docs/odbc-jdbc-interfaces/#using-odbc-to-access-apache-drill-from-bi-tools) 
   * C++ API
 
-### **_Dynamic schema discovery_**
-
-Drill does not require schema or type specification for data in order to start
-the query execution process. Drill starts data processing in record-batches
-and discovers the schema during processing. Self-describing data formats such
-as Parquet, JSON, AVRO, and NoSQL databases have schema specified as part of
-the data itself, which Drill leverages dynamically at query time. Because
-schema can change over the course of a Drill query, all Drill operators are
-designed to reconfigure themselves when schemas change.
-
-### **_Flexible data model_**
-
-Drill allows access to nested data attributes, just like SQL columns, and
-provides intuitive extensions to easily operate on them. From an architectural
-point of view, Drill provides a flexible hierarchical columnar data model that
-can represent complex, highly dynamic and evolving data models. Drill allows
-for efficient processing of these models without the need to flatten or
-materialize them at design time or at execution time. Relational data in Drill
-is treated as a special or simplified case of complex/multi-structured data.
-
-### **_De-centralized metadata_**
-
-Drill does not have a centralized metadata requirement. You do not need to
-create and manage tables and views in a metadata repository, or rely on a
-database administrator group for such a function. Drill metadata is derived
-from the storage plugins that correspond to data sources. Storage plugins
-provide a spectrum of metadata ranging from full metadata (Hive), partial
-metadata (HBase), or no central metadata (files). De-centralized metadata
-means that Drill is NOT tied to a single Hive repository. You can query
-multiple Hive repositories at once and then combine the data with information
-from HBase tables or with a file in a distributed file system. You can also
-use SQL DDL syntax to create metadata within Drill, which gets organized just
-like a traditional database. Drill metadata is accessible through the ANSI
-standard INFORMATION_SCHEMA database.
-
-### **_Extensibility_**
-
-Drill provides an extensible architecture at all layers, including the storage
-plugin, query, query optimization/execution, and client API layers. You can
-customize any layer for the specific needs of an organization or you can
-extend the layer to a broader array of use cases. Drill provides a built in
-classpath scanning and plugin concept to add additional storage plugins,
-functions, and operators with minimal configuration.
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/drill/blob/99af7ad3/_docs/architecture/architectural-highlights/010-flexibility.md
----------------------------------------------------------------------
diff --git a/_docs/architecture/architectural-highlights/010-flexibility.md b/_docs/architecture/architectural-highlights/010-flexibility.md
index 58e6107..a04d4ea 100644
--- a/_docs/architecture/architectural-highlights/010-flexibility.md
+++ b/_docs/architecture/architectural-highlights/010-flexibility.md
@@ -15,7 +15,7 @@ The following features contribute to Drill's flexible architecture:
 Drill does not require schema or type specification for the data in order to
 start the query execution process. Instead, Drill starts processing the data
 in units called record-batches and discovers the schema on the fly during
-processing. Self-describing data formats such as Parquet, JSON, AVRO, and
+processing. Self-describing data formats such as Parquet, JSON, Avro, and
 NoSQL databases have schema specified as part of the data itself, which Drill
 leverages dynamically at query time. Schema can change over the course of a
 Drill query, so all of the Drill operators are designed to reconfigure

http://git-wip-us.apache.org/repos/asf/drill/blob/99af7ad3/_docs/configure-drill/120-configuring-the-drill-shell.md
----------------------------------------------------------------------
diff --git a/_docs/configure-drill/120-configuring-the-drill-shell.md b/_docs/configure-drill/120-configuring-the-drill-shell.md
index b9ff103..d7c474a 100644
--- a/_docs/configure-drill/120-configuring-the-drill-shell.md
+++ b/_docs/configure-drill/120-configuring-the-drill-shell.md
@@ -2,7 +2,7 @@
 title: "Configuring the Drill Shell"
 parent: "Configure Drill"
 ---
-At the Drill shell command prompt, typing "help" lists the configuration and other options you can set to manage shell functionality. Apache Drill 1.0 formats the resultset output tables for readability if possible. In this release, columns having 70 characters or more cannot be formatted. This document formats all output for readability and example purposes.
+After [starting the Drill shell]({{site.baseurl}}/docs/starting-drill-on-linux-and-mac-os-x/), you can type queries on the shell command line. At the Drill shell command prompt, typing "help" lists the configuration and other options you can set to manage shell functionality. Apache Drill 1.0 formats the resultset output tables for readability if possible. In this release, columns having 70 characters or more cannot be formatted. This document formats all output for readability and example purposes.
 
 Formatting tables takes time, which you might notice if running a huge query using the default `outputFormat` setting, which is `table` of the Drill shell. You can set another, more performant table formatting such as `csv`, as shown in the [examples]({{site.baseurl}}/docs/configuring-the-drill-shell/#examples-of-configuring-the-drill-shell). 
 

http://git-wip-us.apache.org/repos/asf/drill/blob/99af7ad3/_docs/data-sources-and-file-formats/040-parquet-format.md
----------------------------------------------------------------------
diff --git a/_docs/data-sources-and-file-formats/040-parquet-format.md b/_docs/data-sources-and-file-formats/040-parquet-format.md
index cd14359..ca8b164 100644
--- a/_docs/data-sources-and-file-formats/040-parquet-format.md
+++ b/_docs/data-sources-and-file-formats/040-parquet-format.md
@@ -158,4 +158,4 @@ Parquet supports the following data description languages:
 * Apache Thrift
 * Google Protocol Buffers 
 
-Implement custom storage plugins, such as an Avro plugin, to create Parquet readers/writers for these formats. 
\ No newline at end of file
+Implement custom storage plugins to create Parquet readers/writers for formats such as Thrift. 
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/drill/blob/99af7ad3/_docs/developer-information/contribute-to-drill/020-apache-drill-contribution-ideas.md
----------------------------------------------------------------------
diff --git a/_docs/developer-information/contribute-to-drill/020-apache-drill-contribution-ideas.md b/_docs/developer-information/contribute-to-drill/020-apache-drill-contribution-ideas.md
index c3f5a87..a4ef0b8 100644
--- a/_docs/developer-information/contribute-to-drill/020-apache-drill-contribution-ideas.md
+++ b/_docs/developer-information/contribute-to-drill/020-apache-drill-contribution-ideas.md
@@ -54,7 +54,6 @@ Currently Drill supports text, JSON and Parquet file formats natively when
 interacting with file system. More readers/writers can be introduced by
 implementing custom storage plugins. Example formats are.
 
-  * AVRO
   * Sequence
   * RC
   * ORC

http://git-wip-us.apache.org/repos/asf/drill/blob/99af7ad3/_docs/getting-started/010-drill-introduction.md
----------------------------------------------------------------------
diff --git a/_docs/getting-started/010-drill-introduction.md b/_docs/getting-started/010-drill-introduction.md
index 100d9da..3645cfb 100644
--- a/_docs/getting-started/010-drill-introduction.md
+++ b/_docs/getting-started/010-drill-introduction.md
@@ -16,7 +16,9 @@ Apache Drill 1.0 offers the following new features:
 * [Query audit logging]({{site.baseurl}}/docs/getting-query-information/) for getting the query history on a Drillbit.
 * Improved connection handling.
 * New Errors tab in the Query Profiles UI that facilitates troubleshooting and distributed storing of profiles.
-* Support for new storage plugin format: [Avro](http://avro.apache.org/docs/current/spec.html)
+* Support for a new storage plugin input format: [Avro](http://avro.apache.org/docs/current/spec.html)
+
+In this release, Drill disables the DECIMAL data type, including casting to DECIMAL and reading DECIMAL types from Parquet and Hive. To enable the DECIMAL type, set the `planner.enable_decimal_data_type` system option to `true`.
 
 Key features of Apache Drill are:
 


[13/31] drill git commit: Merge branch 'gh-pages' of https://github.com/tshiran/drill into gh-pages

Posted by ts...@apache.org.
Merge branch 'gh-pages' of https://github.com/tshiran/drill into gh-pages


Project: http://git-wip-us.apache.org/repos/asf/drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/drill/commit/5a7f700e
Tree: http://git-wip-us.apache.org/repos/asf/drill/tree/5a7f700e
Diff: http://git-wip-us.apache.org/repos/asf/drill/diff/5a7f700e

Branch: refs/heads/gh-pages
Commit: 5a7f700e88d5ae72c4951fee0af78b79023897d2
Parents: 3ae74f0 e3b2c1a
Author: Tomer Shiran <ts...@gmail.com>
Authored: Sun May 17 21:04:58 2015 -0700
Committer: Tomer Shiran <ts...@gmail.com>
Committed: Sun May 17 21:04:58 2015 -0700

----------------------------------------------------------------------
 _data/docs.json                                 | 431 +++++++++----------
 _docs/072-performance-tuning.md                 |   5 +
 _docs/075-getting-query-information.md          |   5 +-
 .../010-architecture-introduction.md            |   2 +-
 .../030-architectural-highlights.md             |   5 -
 _docs/architecture/030-performance.md           |  55 +++
 .../architectural-highlights/010-flexibility.md |  84 ----
 .../architectural-highlights/020-performance.md |  55 ---
 _docs/archived-pages/030-partition-pruning.md   |  75 ++++
 ...guring-a-multitenant-cluster-introduction.md |   4 +-
 .../050-configuring-multitenant-resources.md    |   6 +-
 .../060-configuring-a-shared-drillbit.md        |  32 +-
 _docs/configure-drill/110-partition-pruning.md  |  75 ----
 .../120-configuring-the-drill-shell.md          |  82 ++++
 .../010-configuration-options-introduction.md   |   8 +-
 .../030-planning-and-exececution-options.md     |  40 +-
 .../035-plugin-configuration-introduction.md    |  10 +-
 .../080-drill-default-input-format.md           |   8 +-
 .../090-mongodb-plugin-for-apache-drill.md      |  50 ++-
 ...ata-sources-and-file-formats-introduction.md |   5 +-
 .../040-parquet-format.md                       |   2 +-
 .../020-apache-drill-contribution-ideas.md      |   1 -
 _docs/getting-started/010-drill-introduction.md |  47 +-
 _docs/getting-started/020-why-drill.md          |  36 +-
 .../030-starting-drill-on-linux-and-mac-os-x.md |   2 +-
 .../010-performance-tuning-introduction.md      |  17 +
 .../005-querying-a-file-system-introduction.md  |   1 +
 .../sql-functions/020-data-type-conversion.md   |   8 +-
 _docs/tutorials/020-drill-in-10-minutes.md      |   4 +-
 .../030-analyzing-the-yelp-academic-dataset.md  |   4 +-
 .../050-analyzing-highly-dynamic-datasets.md    |   4 +-
 .../020-getting-to-know-the-drill-sandbox.md    |   2 +-
 .../030-lesson-1-learn-about-the-data-set.md    |  10 +-
 33 files changed, 557 insertions(+), 618 deletions(-)
----------------------------------------------------------------------



[20/31] drill git commit: fix typo in command

Posted by ts...@apache.org.
fix typo in command


Project: http://git-wip-us.apache.org/repos/asf/drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/drill/commit/46617fc8
Tree: http://git-wip-us.apache.org/repos/asf/drill/tree/46617fc8
Diff: http://git-wip-us.apache.org/repos/asf/drill/diff/46617fc8

Branch: refs/heads/gh-pages
Commit: 46617fc8004bb2f0b19c9380f34438f719a9086c
Parents: 3468b99
Author: Kristine Hahn <kh...@maprtech.com>
Authored: Mon May 18 07:44:53 2015 -0700
Committer: Kristine Hahn <kh...@maprtech.com>
Committed: Mon May 18 07:44:53 2015 -0700

----------------------------------------------------------------------
 .../050-starting-drill-on-windows.md                               | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/drill/blob/46617fc8/_docs/install/installing-drill-in-embedded-mode/050-starting-drill-on-windows.md
----------------------------------------------------------------------
diff --git a/_docs/install/installing-drill-in-embedded-mode/050-starting-drill-on-windows.md b/_docs/install/installing-drill-in-embedded-mode/050-starting-drill-on-windows.md
index 2cabb68..2a4a9bd 100644
--- a/_docs/install/installing-drill-in-embedded-mode/050-starting-drill-on-windows.md
+++ b/_docs/install/installing-drill-in-embedded-mode/050-starting-drill-on-windows.md
@@ -14,7 +14,7 @@ At this point, you can [submit queries]({{ site.baseurl }}/docs/drill-in-10-minu
 
 You can use the schema option in the **sqlline** command to specify a storage plugin. Specifying the storage plugin when you start up eliminates the need to specify the storage plugin in the query: For example, this command specifies the `dfs` storage plugin.
 
-    c:\bin/sqlline –u "jdbc:drill:schema=dfs;zk=local"
+    c:\bin\sqlline sqlline.bat –u "jdbc:drill:schema=dfs;zk=local"
 
 ## Exiting the Drill Shell
 


[22/31] drill git commit: add BB's clause pages, renamed sql command pages

Posted by ts...@apache.org.
http://git-wip-us.apache.org/repos/asf/drill/blob/9700ff63/_docs/sql-reference/sql-commands/085-select-offset.md
----------------------------------------------------------------------
diff --git a/_docs/sql-reference/sql-commands/085-select-offset.md b/_docs/sql-reference/sql-commands/085-select-offset.md
new file mode 100644
index 0000000..9e97051
--- /dev/null
+++ b/_docs/sql-reference/sql-commands/085-select-offset.md
@@ -0,0 +1,29 @@
+---
+title: "SELECT OFFSET"
+parent: "SQL Commands"
+---
+The OFFSET clause provides a way to skip a specified number of first rows in a result set before starting to return any rows.
+
+## Syntax
+The OFFSET clause supports the following syntax:
+
+       [ OFFSET start { ROW | ROWS } ]
+
+Specifying ALL returns all records, which is equivalent to omitting the LIMIT clause from the SELECT statement.
+
+## Parameters
+*rows*  
+
+Specifies the number of rows Drill should skip before returning the result set. 
+
+## Usage Notes  
+   * The OFFSET number must be a positive integer and cannot be larger than the number of rows in the underlying result set or no rows are returned.
+   * You can use the OFFSET clause in conjunction with the LIMIT and ORDER BY clauses.
+   * When used with the LIMIT option, OFFSET rows are skipped before starting to count the LIMIT rows that are returned. If the LIMIT option is not used, the number of rows in the result set is reduced by the number of rows that are skipped.
+   * The rows skipped by an OFFSET clause still have to be scanned, so it might be inefficient to use a large OFFSET value.
+
+## Examples
+The following example query returns the result set from row 101 and on, skipping the first 100 rows of the table:
+
+       SELECT * FROM dfs.logs OFFSET 100 ROWS; 
+

http://git-wip-us.apache.org/repos/asf/drill/blob/9700ff63/_docs/sql-reference/sql-commands/086-select-order-by.md
----------------------------------------------------------------------
diff --git a/_docs/sql-reference/sql-commands/086-select-order-by.md b/_docs/sql-reference/sql-commands/086-select-order-by.md
new file mode 100644
index 0000000..794d029
--- /dev/null
+++ b/_docs/sql-reference/sql-commands/086-select-order-by.md
@@ -0,0 +1,71 @@
+---
+title: "SELECT ORDER BY"
+parent: "SQL Commands"
+---
+The ORDER BY clause sorts the result set of a query.
+
+
+
+## Syntax
+The ORDER BY clause supports the following syntax:
+
+       [ ORDER BY expression
+       [ ASC | DESC ]
+       [ NULLS FIRST | NULLS LAST ]
+
+  
+
+## Parameters  
+*expression*  
+
+Defines the sort order of the query result set, typically by specifying one or more columns in the select list.  
+
+You can also specify:  
+
+   * Columns that are not in the select list 
+   * Expressions formed from one or more columns that exist in the tables referenced by the query
+   * Ordinal numbers that represent the position of select list entries (or the position of columns in the table if no select list exists)
+   * Aliases that define select list entries
+   
+When the ORDER BY clause contains multiple expressions, the result set is sorted according to the first expression, then the second expression is applied to rows that have matching values from the first expression, and so on.
+
+ASC  
+Specifies that the results should be returned in ascending order. If the order is not specified, ASC is the default.
+
+DESC  
+Specifies that the results should be returned in descending order. 
+
+NULLS FIRST  
+Specifies that NULL values should be returned before non-NULL values.  
+
+NULLS LAST  
+Specifies that NULL values should be returned after non-NULL values.
+
+## Usage Notes
+   * NULL values are considered "higher" than all other values. With default ascending sort order, NULL values sort at the end.  
+   * When a query does not contain an ORDER BY clause, the system returns result sets with no predictable ordering of the rows. The same query executed twice might return the result set in a different order.  
+   * In any parallel system, when ORDER BY does not produce a unique ordering, the order of the rows is non-deterministic. That is, if the ORDER BY expression produces duplicate values, the return order of those rows may vary from other systems or from one run the system to the next.
+
+## Examples
+The following example query returns sales totals for each month in descending order, listing the highest sales month to the lowest sales month:
+
+       0: jdbc:drill:> select `month`, sum(order_total)
+       from orders group by `month` order by 2 desc;
+       +------------+------------+
+       | month | EXPR$1 |
+       +------------+------------+
+       | June | 950481 |
+       | May | 947796 |
+       | March | 836809 |
+       | April | 807291 |
+       | July | 757395 |
+       | October | 676236 |
+       | August | 572269 |
+       | February | 532901 |
+       | September | 373100 |
+       | January | 346536 |
+       +------------+------------+
+
+
+
+

http://git-wip-us.apache.org/repos/asf/drill/blob/9700ff63/_docs/sql-reference/sql-commands/087-select-union.md
----------------------------------------------------------------------
diff --git a/_docs/sql-reference/sql-commands/087-select-union.md b/_docs/sql-reference/sql-commands/087-select-union.md
new file mode 100644
index 0000000..6d5af65
--- /dev/null
+++ b/_docs/sql-reference/sql-commands/087-select-union.md
@@ -0,0 +1,42 @@
+---
+title: "SELECT UNION"
+parent: "SQL Commands"
+---
+The UNION set operator returns all rows in the result sets of two separate query expressions. For example, if two employee tables exist, you can use the UNION set operator to merge the two tables and build a complete list of all the employees. Drill supports UNION ALL only. Drill does not support DISTINCT.
+
+
+## Syntax
+The UNION set operator supports the following syntax:
+
+       query
+       { UNION [ ALL ] }
+       query
+  
+
+## Parameters  
+*query*  
+
+Any SELECT query that Drill supports. See SELECT.
+
+## Usage Notes
+   * The two SELECT query expressions that represent the direct operands of the UNION must produce the same number of columns. Corresponding columns must contain compatible data types. See Supported Data Types.  
+   * Multiple UNION operators in the same SELECT statement are evaluated left to right, unless otherwise indicated by parentheses.  
+   * You cannot use * in UNION ALL for schemaless data.
+
+## Examples
+The following example uses the UNION ALL set operator to combine click activity data before and after a marketing campaign. The data in the example exists in the `dfs.clicks workspace`.
+ 
+       0: jdbc:drill:> select t.trans_id transaction, t.user_info.cust_id customer from `clicks/clicks.campaign.json` t 
+       union all 
+       select u.trans_id, u.user_info.cust_id  from `clicks/clicks.json` u limit 5;
+       +-------------+------------+
+       | transaction |  customer  |
+       +-------------+------------+
+       | 35232       | 18520      |
+       | 31995       | 17182      |
+       | 35760       | 18228      |
+       | 37090       | 17015      |
+       | 37838       | 18737      |
+       +-------------+------------+
+
+This UNION ALL query returns rows that exist in two files (and includes any duplicate rows from those files): `clicks.campaign.json` and `clicks.json`
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/drill/blob/9700ff63/_docs/sql-reference/sql-commands/088-select-where.md
----------------------------------------------------------------------
diff --git a/_docs/sql-reference/sql-commands/088-select-where.md b/_docs/sql-reference/sql-commands/088-select-where.md
new file mode 100644
index 0000000..5d600fa
--- /dev/null
+++ b/_docs/sql-reference/sql-commands/088-select-where.md
@@ -0,0 +1,59 @@
+---
+title: "SELECT WHERE"
+parent: "SQL Commands"
+---
+The WHERE clause selects rows based on a boolean expression. Only rows for which the expression evaluates to TRUE are returned in the result.
+
+## Syntax
+The WHERE clause supports the following syntax:
+
+       [ WHERE boolean_expression ]  
+
+## Expression  
+A boolean expression can include one or more of the following operators:  
+
+  * AND
+  * OR
+  * NOT
+  * IS NULL
+  * IS NOT NULL
+  * LIKE 
+  * BETWEEN
+  * IN
+  * EXISTS
+  * Comparison operators
+  * Quantified comparison operators
+
+
+## Examples
+The following query compares order totals where the states are California and New York:  
+
+       0: jdbc:drill:> select o1.cust_id, sum(o1.order_total) as ny_sales,
+       (select sum(o2.order_total) from hive.orders o2
+       where o1.cust_id=o2.cust_id and state='ca') as ca_sales
+       from hive.orders o1 where o1.state='ny' group by o1.cust_id
+       order by cust_id limit 20;
+       +------------+------------+------------+
+       |  cust_id   |  ny_sales  |  ca_sales  |
+       +------------+------------+------------+
+       | 1001       | 72         | 47         |
+       | 1002       | 108        | 198        |
+       | 1003       | 83         | null       |
+       | 1004       | 86         | 210        |
+       | 1005       | 168        | 153        |
+       | 1006       | 29         | 326        |
+       | 1008       | 105        | 168        |
+       | 1009       | 443        | 127        |
+       | 1010       | 75         | 18         |
+       | 1012       | 110        | null       |
+       | 1013       | 19         | null       |
+       | 1014       | 106        | 162        |
+       | 1015       | 220        | 153        |
+       | 1016       | 85         | 159        |
+       | 1017       | 82         | 56         |
+       | 1019       | 37         | 196        |
+       | 1020       | 193        | 165        |
+       | 1022       | 124        | null       |
+       | 1023       | 166        | 149        |
+       | 1024       | 233        | null       |
+       +------------+------------+------------+

http://git-wip-us.apache.org/repos/asf/drill/blob/9700ff63/_docs/sql-reference/sql-commands/089-select-with.md
----------------------------------------------------------------------
diff --git a/_docs/sql-reference/sql-commands/089-select-with.md b/_docs/sql-reference/sql-commands/089-select-with.md
new file mode 100644
index 0000000..4253354
--- /dev/null
+++ b/_docs/sql-reference/sql-commands/089-select-with.md
@@ -0,0 +1,95 @@
+---
+title: "SELECT WITH"
+parent: "SQL Commands"
+---
+The WITH clause is an optional clause used to contain one or more common table
+expressions (CTE) where each CTE defines a temporary table that exists for the
+duration of the query. Each subquery in the WITH clause specifies a table
+name, an optional list of column names, and a SELECT statement.
+
+## Syntax
+
+The WITH clause supports the following syntax:
+
+    [ WITH with_subquery [, ...] ]
+    where with_subquery is:
+    with_subquery_table_name [ ( column_name [, ...] ) ] AS ( query ) 
+
+## Parameters
+
+_with_subquery_table_name_
+
+A unique name for a temporary table that defines the results of a WITH clause
+subquery. You cannot use duplicate names within a single WITH clause. You must
+give each subquery a table name that can be referenced in the FROM clause.
+
+_column_name_
+
+An optional list of output column names for the WITH clause subquery,
+separated by commas. The number of column names specified must be equal to or
+less than the number of columns defined by the subquery.
+
+_query_
+
+Any SELECT query that Drill supports. See
+[SELECT]({{ site.baseurl }}/docs/SELECT+Statements).
+
+## Usage Notes
+
+Use the WITH clause to efficiently define temporary tables that Drill can
+access throughout the execution of a single query. The WITH clause is
+typically a simpler alternative to using subqueries in the main body of the
+SELECT statement. In some cases, Drill can evaluate a WITH subquery once and
+reuse the results for query optimization.
+
+You can use a WITH clause in the following SQL statements:
+
+  * SELECT (including subqueries within SELECT statements)
+
+  * CREATE TABLE AS
+
+  * CREATE VIEW
+
+  * EXPLAIN
+
+You can reference the temporary tables in the FROM clause of the query. If the
+FROM clause does not reference any tables defined by the WITH clause, Drill
+ignores the WITH clause and executes the query as normal.
+
+Drill can only reference a table defined by a WITH clause subquery in the
+scope of the SELECT query that the WITH clause begins. For example, you can
+reference such a table in the FROM clause of a subquery in the SELECT list,
+WHERE clause, or HAVING clause. You cannot use a WITH clause in a subquery and
+reference its table in the FROM clause of the main query or another subquery.
+
+You cannot specify another WITH clause inside a WITH clause subquery.
+
+For example, the following query includes a forward reference to table t2 in
+the definition of table t1:
+
+## Example
+
+The following example shows the WITH clause used to create a WITH query named
+`emp_data` that selects all of the rows from the `employee.json` file. The
+main query selects the `full_name, position_title, salary`, and `hire_date`
+rows from the `emp_data` temporary table (created from the WITH subquery) and
+orders the results by the hire date. The `emp_data` table only exists for the
+duration of the query.
+
+**Note:** The `employee.json` file is included with the Drill installation. It is located in the `cp.default` workspace which is configured by default. 
+
+    0: jdbc:drill:zk=local> with emp_data as (select * from cp.`employee.json`) select full_name, position_title, salary, hire_date from emp_data order by hire_date limit 10;
+    +------------------+-------------------------+------------+-----------------------+
+    | full_name        | position_title          |   salary   | hire_date             |
+    +------------------+-------------------------+------------+-----------------------+
+    | Bunny McCown     | Store Assistant Manager | 8000.0     | 1993-05-01 00:00:00.0 |
+    | Danielle Johnson | Store Assistant Manager | 8000.0     | 1993-05-01 00:00:00.0 |
+    | Dick Brummer     | Store Assistant Manager | 7900.0     | 1993-05-01 00:00:00.0 |
+    | Gregory Whiting  | Store Assistant Manager | 10000.0    | 1993-05-01 00:00:00.0 |
+    | Juanita Sharp    | HQ Human Resources      | 6700.0     | 1994-01-01 00:00:00.0 |
+    | Sheri Nowmer     | President               | 80000.0    | 1994-12-01 00:00:00.0 |
+    | Rebecca Kanagaki | VP Human Resources      | 15000.0    | 1994-12-01 00:00:00.0 |
+    | Shauna Wyro      | Store Manager           | 15000.0    | 1994-12-01 00:00:00.0 |
+    | Roberta Damstra  | VP Information Systems  | 25000.0    | 1994-12-01 00:00:00.0 |
+    | Pedro Castillo   | VP Country Manager      | 35000.0    | 1994-12-01 00:00:00.0 |
+    +------------+----------------+--------------+------------------------------------+
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/drill/blob/9700ff63/_docs/sql-reference/sql-commands/090-show-databases-and-show-schemas.md
----------------------------------------------------------------------
diff --git a/_docs/sql-reference/sql-commands/090-show-databases-and-show-schemas.md b/_docs/sql-reference/sql-commands/090-show-databases-and-show-schemas.md
index 0f227ae..a7d07d2 100644
--- a/_docs/sql-reference/sql-commands/090-show-databases-and-show-schemas.md
+++ b/_docs/sql-reference/sql-commands/090-show-databases-and-show-schemas.md
@@ -1,5 +1,5 @@
 ---
-title: "SHOW DATABASES and SHOW SCHEMAS Command"
+title: "SHOW DATABASES and SHOW SCHEMAS"
 parent: "SQL Commands"
 ---
 The SHOW DATABASES and SHOW SCHEMAS commands generate a list of available Drill schemas that you can query.

http://git-wip-us.apache.org/repos/asf/drill/blob/9700ff63/_docs/sql-reference/sql-commands/100-show-files.md
----------------------------------------------------------------------
diff --git a/_docs/sql-reference/sql-commands/100-show-files.md b/_docs/sql-reference/sql-commands/100-show-files.md
index 9651add..f115c21 100644
--- a/_docs/sql-reference/sql-commands/100-show-files.md
+++ b/_docs/sql-reference/sql-commands/100-show-files.md
@@ -1,5 +1,5 @@
 ---
-title: "SHOW FILES Command"
+title: "SHOW FILES"
 parent: "SQL Commands"
 ---
 The SHOW FILES command provides a quick report of the file systems that are

http://git-wip-us.apache.org/repos/asf/drill/blob/9700ff63/_docs/sql-reference/sql-commands/110-show-tables-command.md
----------------------------------------------------------------------
diff --git a/_docs/sql-reference/sql-commands/110-show-tables-command.md b/_docs/sql-reference/sql-commands/110-show-tables-command.md
deleted file mode 100644
index 560bde4..0000000
--- a/_docs/sql-reference/sql-commands/110-show-tables-command.md
+++ /dev/null
@@ -1,136 +0,0 @@
----
-title: "SHOW TABLES Command"
-parent: "SQL Commands"
----
-The SHOW TABLES command returns a list of views created within a schema. It
-also returns the tables that exist in Hive, HBase, and MapR-DB when you have
-these data sources configured as storage plugin instances. See[ Storage Plugin
-Registration]({{ site.baseurl }}/docs/storage-plugin-registration).
-
-## Syntax
-
-The SHOW TABLES command supports the following syntax:
-
-    SHOW TABLES;
-
-## Usage Notes
-
-First issue the USE command to identify the schema for which you want to view
-tables or views. For example, the following USE statement tells Drill that you
-only want information from the `dfs.myviews` schema:
-
-    USE dfs.myviews;
-
-In this example, “`myviews`” is a workspace created withing an instance of the
-`dfs` storage plugin.
-
-When you use a particular schema and then issue the SHOW TABLES command, Drill
-returns the tables and views within that schema.
-
-#### Limitations
-
-  * You can create and query tables within the file system, however Drill does not return these tables when you issue the SHOW TABLES command. You can issue the [SHOW FILES ]({{ site.baseurl }}/docs/show-files-command)command to see a list of all files, tables, and views, including those created in Drill. 
-
-  * You cannot create Hive, HBase, or MapR-DB tables in Drill. 
-
-## Examples
-
-The following examples demonstrate the steps that you can follow when you want
-to issue the SHOW TABLES command on the file system, Hive, and HBase.  
-  
-Complete the following steps to see views that exist in a file system and
-tables that exist in Hive and HBase data sources:
-
-  1. Issue the SHOW SCHEMAS command to see a list of available schemas.
-
-        0: jdbc:drill:zk=drilldemo:5181> show schemas;
-        +-------------+
-        | SCHEMA_NAME |
-        +-------------+
-        | hive.default |
-        | dfs.reviews |
-        | dfs.flatten |
-        | dfs.default |
-        | dfs.root  |
-        | dfs.logs  |
-        | dfs.myviews   |
-        | dfs.clicks  |
-        | dfs.tmp   |
-        | sys       |
-        | hbase     |
-        | INFORMATION_SCHEMA |
-        | s3.twitter  |
-        | s3.reviews  |
-        | s3.default  |
-        +-------------+
-        15 rows selected (0.072 seconds)
-
-  2. Issue the USE command to switch to a particular schema. When you use a particular schema, Drill searches or queries within that schema only. 
-
-        0: jdbc:drill:zk=drilldemo:5181> use dfs.myviews;
-        +------------+------------+
-        |   ok  |  summary   |
-        +------------+------------+
-        | true      | Default schema changed to 'dfs.myviews' |
-        +------------+------------+
-        1 row selected (0.025 seconds)
-
-  3. Issue the SHOW TABLES command to see the views or tables that exist within workspace.
-
-        0: jdbc:drill:zk=drilldemo:5181> show tables;
-        +--------------+------------+
-        | TABLE_SCHEMA | TABLE_NAME |
-        +--------------+------------+
-        | dfs.myviews   | logs_vw   |
-        | dfs.myviews   | customers_vw |
-        | dfs.myviews   | s3_review_vw |
-        | dfs.myviews   | clicks_vw  |
-        | dfs.myviews   | nestedclickview |
-        | dfs.myviews   | s3_user_vw |
-        | dfs.myviews   | s3_bus_vw  |
-        +--------------+------------+
-        7 rows selected (0.499 seconds)
-        0: jdbc:drill:zk=drilldemo:5181>
-
-  4. Switch to the Hive schema and issue the SHOW TABLES command to see the Hive tables that exist.
-
-        0: jdbc:drill:zk=drilldemo:5181> use hive;
-        +------------+------------+
-        |   ok  |  summary   |
-        +------------+------------+
-        | true      | Default schema changed to 'hive' |
-        +------------+------------+
-        1 row selected (0.043 seconds)
-         
-        0: jdbc:drill:zk=drilldemo:5181> show tables;
-        +--------------+------------+
-        | TABLE_SCHEMA | TABLE_NAME |
-        +--------------+------------+
-        | hive.default | orders     |
-        | hive.default | products   |
-        +--------------+------------+
-        2 rows selected (0.552 seconds)
-
-  5. Switch to the HBase schema and issue the SHOW TABLES command to see the HBase tables that exist within the schema.
-
-        0: jdbc:drill:zk=drilldemo:5181> use hbase;
-        +------------+------------+
-        |   ok  |  summary   |
-        +------------+------------+
-        | true      | Default schema changed to 'hbase' |
-        +------------+------------+
-        1 row selected (0.043 seconds)
-         
-         
-        0: jdbc:drill:zk=drilldemo:5181> show tables;
-        +--------------+------------+
-        | TABLE_SCHEMA | TABLE_NAME |
-        +--------------+------------+
-        | hbase     | customers  |
-        +--------------+------------+
-        1 row selected (0.412 seconds)
-
-  
-
-  
-

http://git-wip-us.apache.org/repos/asf/drill/blob/9700ff63/_docs/sql-reference/sql-commands/110-show-tables.md
----------------------------------------------------------------------
diff --git a/_docs/sql-reference/sql-commands/110-show-tables.md b/_docs/sql-reference/sql-commands/110-show-tables.md
new file mode 100644
index 0000000..40621cf
--- /dev/null
+++ b/_docs/sql-reference/sql-commands/110-show-tables.md
@@ -0,0 +1,136 @@
+---
+title: "SHOW TABLES"
+parent: "SQL Commands"
+---
+The SHOW TABLES command returns a list of views created within a schema. It
+also returns the tables that exist in Hive, HBase, and MapR-DB when you have
+these data sources configured as storage plugin instances. See[ Storage Plugin
+Registration]({{ site.baseurl }}/docs/storage-plugin-registration).
+
+## Syntax
+
+The SHOW TABLES command supports the following syntax:
+
+    SHOW TABLES;
+
+## Usage Notes
+
+First issue the USE command to identify the schema for which you want to view
+tables or views. For example, the following USE statement tells Drill that you
+only want information from the `dfs.myviews` schema:
+
+    USE dfs.myviews;
+
+In this example, “`myviews`” is a workspace created withing an instance of the
+`dfs` storage plugin.
+
+When you use a particular schema and then issue the SHOW TABLES command, Drill
+returns the tables and views within that schema.
+
+#### Limitations
+
+  * You can create and query tables within the file system, however Drill does not return these tables when you issue the SHOW TABLES command. You can issue the [SHOW FILES ]({{ site.baseurl }}/docs/show-files-command)command to see a list of all files, tables, and views, including those created in Drill. 
+
+  * You cannot create Hive, HBase, or MapR-DB tables in Drill. 
+
+## Examples
+
+The following examples demonstrate the steps that you can follow when you want
+to issue the SHOW TABLES command on the file system, Hive, and HBase.  
+  
+Complete the following steps to see views that exist in a file system and
+tables that exist in Hive and HBase data sources:
+
+  1. Issue the SHOW SCHEMAS command to see a list of available schemas.
+
+        0: jdbc:drill:zk=drilldemo:5181> show schemas;
+        +-------------+
+        | SCHEMA_NAME |
+        +-------------+
+        | hive.default |
+        | dfs.reviews |
+        | dfs.flatten |
+        | dfs.default |
+        | dfs.root  |
+        | dfs.logs  |
+        | dfs.myviews   |
+        | dfs.clicks  |
+        | dfs.tmp   |
+        | sys       |
+        | hbase     |
+        | INFORMATION_SCHEMA |
+        | s3.twitter  |
+        | s3.reviews  |
+        | s3.default  |
+        +-------------+
+        15 rows selected (0.072 seconds)
+
+  2. Issue the USE command to switch to a particular schema. When you use a particular schema, Drill searches or queries within that schema only. 
+
+        0: jdbc:drill:zk=drilldemo:5181> use dfs.myviews;
+        +------------+------------+
+        |   ok  |  summary   |
+        +------------+------------+
+        | true      | Default schema changed to 'dfs.myviews' |
+        +------------+------------+
+        1 row selected (0.025 seconds)
+
+  3. Issue the SHOW TABLES command to see the views or tables that exist within workspace.
+
+        0: jdbc:drill:zk=drilldemo:5181> show tables;
+        +--------------+------------+
+        | TABLE_SCHEMA | TABLE_NAME |
+        +--------------+------------+
+        | dfs.myviews   | logs_vw   |
+        | dfs.myviews   | customers_vw |
+        | dfs.myviews   | s3_review_vw |
+        | dfs.myviews   | clicks_vw  |
+        | dfs.myviews   | nestedclickview |
+        | dfs.myviews   | s3_user_vw |
+        | dfs.myviews   | s3_bus_vw  |
+        +--------------+------------+
+        7 rows selected (0.499 seconds)
+        0: jdbc:drill:zk=drilldemo:5181>
+
+  4. Switch to the Hive schema and issue the SHOW TABLES command to see the Hive tables that exist.
+
+        0: jdbc:drill:zk=drilldemo:5181> use hive;
+        +------------+------------+
+        |   ok  |  summary   |
+        +------------+------------+
+        | true      | Default schema changed to 'hive' |
+        +------------+------------+
+        1 row selected (0.043 seconds)
+         
+        0: jdbc:drill:zk=drilldemo:5181> show tables;
+        +--------------+------------+
+        | TABLE_SCHEMA | TABLE_NAME |
+        +--------------+------------+
+        | hive.default | orders     |
+        | hive.default | products   |
+        +--------------+------------+
+        2 rows selected (0.552 seconds)
+
+  5. Switch to the HBase schema and issue the SHOW TABLES command to see the HBase tables that exist within the schema.
+
+        0: jdbc:drill:zk=drilldemo:5181> use hbase;
+        +------------+------------+
+        |   ok  |  summary   |
+        +------------+------------+
+        | true      | Default schema changed to 'hbase' |
+        +------------+------------+
+        1 row selected (0.043 seconds)
+         
+         
+        0: jdbc:drill:zk=drilldemo:5181> show tables;
+        +--------------+------------+
+        | TABLE_SCHEMA | TABLE_NAME |
+        +--------------+------------+
+        | hbase     | customers  |
+        +--------------+------------+
+        1 row selected (0.412 seconds)
+
+  
+
+  
+

http://git-wip-us.apache.org/repos/asf/drill/blob/9700ff63/_docs/sql-reference/sql-commands/120-use-command.md
----------------------------------------------------------------------
diff --git a/_docs/sql-reference/sql-commands/120-use-command.md b/_docs/sql-reference/sql-commands/120-use-command.md
deleted file mode 100644
index b1bc24a..0000000
--- a/_docs/sql-reference/sql-commands/120-use-command.md
+++ /dev/null
@@ -1,170 +0,0 @@
----
-title: "USE Command"
-parent: "SQL Commands"
----
-The USE command changes the schema context to the specified schema. When you
-issue the USE command to switch to a particular schema, Drill queries that
-schema only.
-
-## Syntax
-
-The USE command supports the following syntax:
-
-    USE schema_name;
-
-## Parameters
-
-_schema_name_  
-A unique name for a Drill schema. A schema in Drill is a configured storage
-plugin, such as hive, or a storage plugin and workspace. For example,
-`dfs.donuts` where `dfs` is an instance of the file system configured as a
-storage plugin, and `donuts` is a workspace configured to point to a directory
-within the file system. You can configure and use multiple storage plugins and
-workspaces in Drill. See [Storage Plugin Registration]({{ site.baseurl }}/docs/storage-plugin-registration) and
-[Workspaces]({{ site.baseurl }}/docs/Workspaces).
-
-## Usage Notes
-
-Issue the USE command to change to a particular schema. When you use a schema,
-you do not have to include the full path to a file or table in your query.  
-  
-For example, to query a file named `donuts.json` in the
-`/users/max/drill/json/` directory, you must include the full file path in
-your query if you do not use a defined workspace
-
-    SELECT * FROM dfs.`/users/max/drill/json/donuts.json` WHERE type='frosted';
-
-If you create a schema that points to the `~/json` directory where the file is
-located and then use the schema, you can issue the query without explicitly
-stating the file path:
-
-    USE dfs.json;  
-    SELECT * FROM `donuts.json`WHERE type='frosted';
-
-If you do not use a schema before querying a table, you must use absolute
-notation, such as `[schema.]table[.column]`, to query the table. If you switch
-to the schema where the table exists, you can just specify the table name in
-the query. For example, to query a table named "`products`" in the `hive`
-schema, tell Drill to use the hive schema and then issue your query with the
-table name only:
-
-    USE hive;  
-    SELECT * FROM products limit 5;   
-  
-Before you issue the USE command, you may want to run SHOW DATABASES or SHOW
-SCHEMAS to see a list of the configured storage plugins and workspaces.
-
-## Example
-
-This example demonstrates how to use a file system and a hive schema to query
-a file and table in Drill.  
-  
-Issue the SHOW DATABASES or SHOW SCHEMAS command to see a list of the
-available schemas that you can use. Both commands return the same results.
-
-    0: jdbc:drill:zk=drilldemo:5181> show schemas;
-    +-------------+
-    | SCHEMA_NAME |
-    +-------------+
-    | hive.default |
-    | dfs.reviews |
-    | dfs.flatten |
-    | dfs.default |
-    | dfs.root    |
-    | dfs.logs    |
-    | dfs.myviews   |
-    | dfs.clicks  |
-    | dfs.tmp     |
-    | sys         |
-    | hbase       |
-    | INFORMATION_SCHEMA |
-    | s3.twitter  |
-    | s3.reviews  |
-    | s3.default  |
-    +-------------+
-    15 rows selected (0.059 seconds)
-
-
-Issue the USE command with the schema that you want Drill to query.  
-**Note:** If you use any of the Drill default schemas, such as `cp.default` or `dfs.default`, do not include .`default`. For example, if you want Drill to issue queries on files in its classpath, you can issue the following command:
-
-    0: jdbc:drill:zk=local> use cp;
-    +------------+------------+
-    |     ok     |  summary   |
-    +------------+------------+
-    | true       | Default schema changed to 'cp' |
-    +------------+------------+
-    1 row selected (0.04 seconds)
-
-Issue the USE command with a file system schema.
-
-    0: jdbc:drill:zk=drilldemo:5181> use dfs.logs;
-    +------------+------------+
-    |     ok     |  summary   |
-    +------------+------------+
-    | true       | Default schema changed to 'dfs.logs' |
-    +------------+------------+
-    1 row selected (0.054 seconds)
-
-You can issue the SHOW FILES command to view the files and directories within
-the schema.
-
-    0: jdbc:drill:zk=drilldemo:5181> show files;
-    +------------+-------------+------------+------------+------------+------------+-------------+------------+------------------+
-    |    name    | isDirectory |   isFile   |   length   |   owner    |   group    | permissions | accessTime | modificationTime |
-    +------------+-------------+------------+------------+------------+------------+-------------+------------+------------------+
-    | csv        | true        | false      | 1          | mapr       | mapr       | rwxrwxr-x   | 2015-02-09 06:49:17.0 | 2015-02-09 06:50:11.172 |
-    | logs       | true        | false      | 3          | mapr       | mapr       | rwxrwxr-x   | 2014-12-16 18:58:26.0 | 2014-12-16 18:58:27.223 |
-    +------------+-------------+------------+------------+------------+------------+-------------+------------+------------------+
-    2 rows selected (0.156 seconds)
-
-Query a file or directory in the file system schema.
-
-    0: jdbc:drill:zk=drilldemo:5181> select * from logs limit 5;
-    +------------+------------+------------+------------+------------+------------+------------+------------+------------+------------+------------+------------+
-    |    dir0    |    dir1    |  trans_id  |    date    |    time    |  cust_id   |   device   |   state    |  camp_id   |  keywords  |  prod_id   | purch_flag |
-    +------------+------------+------------+------------+------------+------------+------------+------------+------------+------------+------------+------------+
-    | 2014       | 8          | 24181      | 08/02/2014 | 09:23:52   | 0          | IOS5       | il         | 2          | wait       | 128        | false      |
-    | 2014       | 8          | 24195      | 08/02/2014 | 07:58:19   | 243        | IOS5       | mo         | 6          | hmm        | 107        | false      |
-    | 2014       | 8          | 24204      | 08/01/2014 | 12:10:27   | 12048      | IOS6       | il         | 1          | marge      | 324        | false      |
-    | 2014       | 8          | 24222      | 08/02/2014 | 16:28:37   | 2488       | IOS6       | pa         | 2          | to         | 391        | false      |
-    | 2014       | 8          | 24227      | 08/02/2014 | 07:14:00   | 154687     | IOS5       | wa         | 2          | on         | 376        | false      |
-    +------------+------------+------------+------------+------------+------------+------------+------------+------------+------------+------------+------------+
-
-Issue the USE command to switch to the hive schema.
-
-    0: jdbc:drill:zk=drilldemo:5181> use hive;
-    +------------+------------+
-    |     ok     |  summary   |
-    +------------+------------+
-    | true       | Default schema changed to 'hive' |
-    +------------+------------+
-    1 row selected (0.093 seconds)
-
-Issue the SHOW TABLES command to see the tables that exist within the schema.
-
-    0: jdbc:drill:zk=drilldemo:5181> show tables;
-    +--------------+------------+
-    | TABLE_SCHEMA | TABLE_NAME |
-    +--------------+------------+
-    | hive.default | orders     |
-    | hive.default | products   |
-    +--------------+------------+
-    2 rows selected (0.421 seconds)
-
-Query a table within the schema.
-
-    0: jdbc:drill:zk=drilldemo:5181> select * from products limit 5;
-    +------------+------------+------------+------------+
-    |  prod_id   |    name    |  category  |   price    |
-    +------------+------------+------------+------------+
-    | 0          | Sony notebook | laptop     | 959        |
-    | 1          | #10-4 1/8 x 9 1/2 Premium Diagonal Seam Envelopes | Envelopes  | 16         |
-    | 2          | #10- 4 1/8 x 9 1/2 Recycled Envelopes | Envelopes  | 9          |
-    | 3          | #10- 4 1/8 x 9 1/2 Security-Tint Envelopes | Envelopes  | 8          |
-    | 4          | #10 Self-Seal White Envelopes | Envelopes  | 11         |
-    +------------+------------+------------+------------+
-    5 rows selected (0.211 seconds)
-
-  
-

http://git-wip-us.apache.org/repos/asf/drill/blob/9700ff63/_docs/sql-reference/sql-commands/120-use.md
----------------------------------------------------------------------
diff --git a/_docs/sql-reference/sql-commands/120-use.md b/_docs/sql-reference/sql-commands/120-use.md
new file mode 100644
index 0000000..aaaf74f
--- /dev/null
+++ b/_docs/sql-reference/sql-commands/120-use.md
@@ -0,0 +1,170 @@
+---
+title: "USE"
+parent: "SQL Commands"
+---
+The USE command changes the schema context to the specified schema. When you
+issue the USE command to switch to a particular schema, Drill queries that
+schema only.
+
+## Syntax
+
+The USE command supports the following syntax:
+
+    USE schema_name;
+
+## Parameters
+
+_schema_name_  
+A unique name for a Drill schema. A schema in Drill is a configured storage
+plugin, such as hive, or a storage plugin and workspace. For example,
+`dfs.donuts` where `dfs` is an instance of the file system configured as a
+storage plugin, and `donuts` is a workspace configured to point to a directory
+within the file system. You can configure and use multiple storage plugins and
+workspaces in Drill. See [Storage Plugin Registration]({{ site.baseurl }}/docs/storage-plugin-registration) and
+[Workspaces]({{ site.baseurl }}/docs/Workspaces).
+
+## Usage Notes
+
+Issue the USE command to change to a particular schema. When you use a schema,
+you do not have to include the full path to a file or table in your query.  
+  
+For example, to query a file named `donuts.json` in the
+`/users/max/drill/json/` directory, you must include the full file path in
+your query if you do not use a defined workspace
+
+    SELECT * FROM dfs.`/users/max/drill/json/donuts.json` WHERE type='frosted';
+
+If you create a schema that points to the `~/json` directory where the file is
+located and then use the schema, you can issue the query without explicitly
+stating the file path:
+
+    USE dfs.json;  
+    SELECT * FROM `donuts.json`WHERE type='frosted';
+
+If you do not use a schema before querying a table, you must use absolute
+notation, such as `[schema.]table[.column]`, to query the table. If you switch
+to the schema where the table exists, you can just specify the table name in
+the query. For example, to query a table named "`products`" in the `hive`
+schema, tell Drill to use the hive schema and then issue your query with the
+table name only:
+
+    USE hive;  
+    SELECT * FROM products limit 5;   
+  
+Before you issue the USE command, you may want to run SHOW DATABASES or SHOW
+SCHEMAS to see a list of the configured storage plugins and workspaces.
+
+## Example
+
+This example demonstrates how to use a file system and a hive schema to query
+a file and table in Drill.  
+  
+Issue the SHOW DATABASES or SHOW SCHEMAS command to see a list of the
+available schemas that you can use. Both commands return the same results.
+
+    0: jdbc:drill:zk=drilldemo:5181> show schemas;
+    +-------------+
+    | SCHEMA_NAME |
+    +-------------+
+    | hive.default |
+    | dfs.reviews |
+    | dfs.flatten |
+    | dfs.default |
+    | dfs.root    |
+    | dfs.logs    |
+    | dfs.myviews   |
+    | dfs.clicks  |
+    | dfs.tmp     |
+    | sys         |
+    | hbase       |
+    | INFORMATION_SCHEMA |
+    | s3.twitter  |
+    | s3.reviews  |
+    | s3.default  |
+    +-------------+
+    15 rows selected (0.059 seconds)
+
+
+Issue the USE command with the schema that you want Drill to query.  
+**Note:** If you use any of the Drill default schemas, such as `cp.default` or `dfs.default`, do not include .`default`. For example, if you want Drill to issue queries on files in its classpath, you can issue the following command:
+
+    0: jdbc:drill:zk=local> use cp;
+    +------------+------------+
+    |     ok     |  summary   |
+    +------------+------------+
+    | true       | Default schema changed to 'cp' |
+    +------------+------------+
+    1 row selected (0.04 seconds)
+
+Issue the USE command with a file system schema.
+
+    0: jdbc:drill:zk=drilldemo:5181> use dfs.logs;
+    +------------+------------+
+    |     ok     |  summary   |
+    +------------+------------+
+    | true       | Default schema changed to 'dfs.logs' |
+    +------------+------------+
+    1 row selected (0.054 seconds)
+
+You can issue the SHOW FILES command to view the files and directories within
+the schema.
+
+    0: jdbc:drill:zk=drilldemo:5181> show files;
+    +------------+-------------+------------+------------+------------+------------+-------------+------------+------------------+
+    |    name    | isDirectory |   isFile   |   length   |   owner    |   group    | permissions | accessTime | modificationTime |
+    +------------+-------------+------------+------------+------------+------------+-------------+------------+------------------+
+    | csv        | true        | false      | 1          | mapr       | mapr       | rwxrwxr-x   | 2015-02-09 06:49:17.0 | 2015-02-09 06:50:11.172 |
+    | logs       | true        | false      | 3          | mapr       | mapr       | rwxrwxr-x   | 2014-12-16 18:58:26.0 | 2014-12-16 18:58:27.223 |
+    +------------+-------------+------------+------------+------------+------------+-------------+------------+------------------+
+    2 rows selected (0.156 seconds)
+
+Query a file or directory in the file system schema.
+
+    0: jdbc:drill:zk=drilldemo:5181> select * from logs limit 5;
+    +------------+------------+------------+------------+------------+------------+------------+------------+------------+------------+------------+------------+
+    |    dir0    |    dir1    |  trans_id  |    date    |    time    |  cust_id   |   device   |   state    |  camp_id   |  keywords  |  prod_id   | purch_flag |
+    +------------+------------+------------+------------+------------+------------+------------+------------+------------+------------+------------+------------+
+    | 2014       | 8          | 24181      | 08/02/2014 | 09:23:52   | 0          | IOS5       | il         | 2          | wait       | 128        | false      |
+    | 2014       | 8          | 24195      | 08/02/2014 | 07:58:19   | 243        | IOS5       | mo         | 6          | hmm        | 107        | false      |
+    | 2014       | 8          | 24204      | 08/01/2014 | 12:10:27   | 12048      | IOS6       | il         | 1          | marge      | 324        | false      |
+    | 2014       | 8          | 24222      | 08/02/2014 | 16:28:37   | 2488       | IOS6       | pa         | 2          | to         | 391        | false      |
+    | 2014       | 8          | 24227      | 08/02/2014 | 07:14:00   | 154687     | IOS5       | wa         | 2          | on         | 376        | false      |
+    +------------+------------+------------+------------+------------+------------+------------+------------+------------+------------+------------+------------+
+
+Issue the USE command to switch to the hive schema.
+
+    0: jdbc:drill:zk=drilldemo:5181> use hive;
+    +------------+------------+
+    |     ok     |  summary   |
+    +------------+------------+
+    | true       | Default schema changed to 'hive' |
+    +------------+------------+
+    1 row selected (0.093 seconds)
+
+Issue the SHOW TABLES command to see the tables that exist within the schema.
+
+    0: jdbc:drill:zk=drilldemo:5181> show tables;
+    +--------------+------------+
+    | TABLE_SCHEMA | TABLE_NAME |
+    +--------------+------------+
+    | hive.default | orders     |
+    | hive.default | products   |
+    +--------------+------------+
+    2 rows selected (0.421 seconds)
+
+Query a table within the schema.
+
+    0: jdbc:drill:zk=drilldemo:5181> select * from products limit 5;
+    +------------+------------+------------+------------+
+    |  prod_id   |    name    |  category  |   price    |
+    +------------+------------+------------+------------+
+    | 0          | Sony notebook | laptop     | 959        |
+    | 1          | #10-4 1/8 x 9 1/2 Premium Diagonal Seam Envelopes | Envelopes  | 16         |
+    | 2          | #10- 4 1/8 x 9 1/2 Recycled Envelopes | Envelopes  | 9          |
+    | 3          | #10- 4 1/8 x 9 1/2 Security-Tint Envelopes | Envelopes  | 8          |
+    | 4          | #10 Self-Seal White Envelopes | Envelopes  | 11         |
+    +------------+------------+------------+------------+
+    5 rows selected (0.211 seconds)
+
+  
+


[21/31] drill git commit: add screenshot, clarification

Posted by ts...@apache.org.
add screenshot, clarification


Project: http://git-wip-us.apache.org/repos/asf/drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/drill/commit/4f4d4fb8
Tree: http://git-wip-us.apache.org/repos/asf/drill/tree/4f4d4fb8
Diff: http://git-wip-us.apache.org/repos/asf/drill/diff/4f4d4fb8

Branch: refs/heads/gh-pages
Commit: 4f4d4fb8dbdcdd188cc651713427a04c5588fc7e
Parents: 46617fc
Author: Kristine Hahn <kh...@maprtech.com>
Authored: Mon May 18 08:23:01 2015 -0700
Committer: Kristine Hahn <kh...@maprtech.com>
Committed: Mon May 18 08:23:01 2015 -0700

----------------------------------------------------------------------
 _docs/img/sqlline1.png                          | Bin 6633 -> 10413 bytes
 .../050-starting-drill-on-windows.md            |  15 +++++++++------
 2 files changed, 9 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/drill/blob/4f4d4fb8/_docs/img/sqlline1.png
----------------------------------------------------------------------
diff --git a/_docs/img/sqlline1.png b/_docs/img/sqlline1.png
index 5ea6b30..5045644 100755
Binary files a/_docs/img/sqlline1.png and b/_docs/img/sqlline1.png differ

http://git-wip-us.apache.org/repos/asf/drill/blob/4f4d4fb8/_docs/install/installing-drill-in-embedded-mode/050-starting-drill-on-windows.md
----------------------------------------------------------------------
diff --git a/_docs/install/installing-drill-in-embedded-mode/050-starting-drill-on-windows.md b/_docs/install/installing-drill-in-embedded-mode/050-starting-drill-on-windows.md
index 2a4a9bd..942d727 100644
--- a/_docs/install/installing-drill-in-embedded-mode/050-starting-drill-on-windows.md
+++ b/_docs/install/installing-drill-in-embedded-mode/050-starting-drill-on-windows.md
@@ -4,17 +4,20 @@ parent: "Installing Drill in Embedded Mode"
 ---
 Start the Drill shell using the **sqlline command**. The `zk=local` means the local node is the ZooKeeper node. Complete the following steps to launch the Drill shell:
 
-1. Open the apache-drill-0.1.0 folder.  
-2. Go to the bin directory.
-2. Open Command Prompt and type the following command on the command line:
+1. Open Command Prompt.
+2. Open the apache-drill-1.0.0 folder. For example:  
+   ``cd apache-drill-1.0.0``
+3. Go to the bin directory. For example:  
+   ``cd bin``
+4. Type the following command on the command line:
    ``sqlline.bat -u "jdbc:drill:zk=local"``
-3. Enter the username, `admin`, and password, also `admin` when prompted.
-   The `0: jdbc:drill:zk=local>` prompt appears.
+   ![drill install dir]({{ site.baseurl }}/docs/img/sqlline1.png)
+
 At this point, you can [submit queries]({{ site.baseurl }}/docs/drill-in-10-minutes#query-sample-data) to Drill.
 
 You can use the schema option in the **sqlline** command to specify a storage plugin. Specifying the storage plugin when you start up eliminates the need to specify the storage plugin in the query: For example, this command specifies the `dfs` storage plugin.
 
-    c:\bin\sqlline sqlline.bat –u "jdbc:drill:schema=dfs;zk=local"
+    C:\bin\sqlline sqlline.bat –u "jdbc:drill:schema=dfs;zk=local"
 
 ## Exiting the Drill Shell
 


[17/31] drill git commit: correct step 4

Posted by ts...@apache.org.
correct step 4


Project: http://git-wip-us.apache.org/repos/asf/drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/drill/commit/89213671
Tree: http://git-wip-us.apache.org/repos/asf/drill/tree/89213671
Diff: http://git-wip-us.apache.org/repos/asf/drill/diff/89213671

Branch: refs/heads/gh-pages
Commit: 8921367137597a127134a899b2ef386ed567f361
Parents: 12b47f1
Author: Kristine Hahn <kh...@maprtech.com>
Authored: Mon May 18 07:32:49 2015 -0700
Committer: Kristine Hahn <kh...@maprtech.com>
Committed: Mon May 18 07:32:49 2015 -0700

----------------------------------------------------------------------
 .../050-starting-drill-on-windows.md                               | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/drill/blob/89213671/_docs/install/installing-drill-in-embedded-mode/050-starting-drill-on-windows.md
----------------------------------------------------------------------
diff --git a/_docs/install/installing-drill-in-embedded-mode/050-starting-drill-on-windows.md b/_docs/install/installing-drill-in-embedded-mode/050-starting-drill-on-windows.md
index 130faa1..027b052 100644
--- a/_docs/install/installing-drill-in-embedded-mode/050-starting-drill-on-windows.md
+++ b/_docs/install/installing-drill-in-embedded-mode/050-starting-drill-on-windows.md
@@ -8,7 +8,7 @@ Start the Drill shell using the **sqlline command**. The `zk=local` means the lo
 2. Go to the bin directory.
 2. Open Command Prompt and type the following command on the command line:
    ``sqlline.bat -u "jdbc:drill:zk=local"``
-3. At the sqlline> prompt, type `"!connect jdbc:drill:zk=local"` and then press Enter:
+3. At the sqlline> prompt, type `!connect jdbc:drill:zk=local` and then press Enter:
    ![sqlline]({{ site.baseurl }}/docs/img/sqlline1.png)
 4. Enter the username, `admin`, and password, also `admin` when prompted.
    The `0: jdbc:drill:zk=local>` prompt appears.


[30/31] drill git commit: Press release blog post

Posted by ts...@apache.org.
Press release blog post


Project: http://git-wip-us.apache.org/repos/asf/drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/drill/commit/7d663345
Tree: http://git-wip-us.apache.org/repos/asf/drill/tree/7d663345
Diff: http://git-wip-us.apache.org/repos/asf/drill/diff/7d663345

Branch: refs/heads/gh-pages
Commit: 7d66334514038b394fef71ecb15f380796d43bb4
Parents: 5a7f700
Author: Tomer Shiran <ts...@gmail.com>
Authored: Mon May 18 16:34:16 2015 -0700
Committer: Tomer Shiran <ts...@gmail.com>
Committed: Mon May 18 16:34:16 2015 -0700

----------------------------------------------------------------------
 blog/_drafts/drill-1.0-released.md | 26 ++++++++++++++++++--------
 1 file changed, 18 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/drill/blob/7d663345/blog/_drafts/drill-1.0-released.md
----------------------------------------------------------------------
diff --git a/blog/_drafts/drill-1.0-released.md b/blog/_drafts/drill-1.0-released.md
index e0631f7..2c129eb 100644
--- a/blog/_drafts/drill-1.0-released.md
+++ b/blog/_drafts/drill-1.0-released.md
@@ -21,29 +21,37 @@ Tomer Shiran and Jacques Nadeau
 
 <hr />
 
-# The Apache Software Foundation Announces Apache™ Drill™ 1.0
+The Apache Software Foundation Announces Apache™ Drill™ 1.0
 
-## Open Source schema-free SQL query engine revolutionizes data exploration and analytics for Apache Hadoop®, NoSQL and Cloud storage
+Thousands of users adopt Open Source, enterprise-grade, schema-free SQL query engine for Apache Hadoop®, NoSQL and Cloud storage.
 
-Forest Hill, MD - 19 May 2015 - The Apache Software Foundation (ASF), the all-volunteer developers, stewards, and incubators of more than 350 Open Source projects and initiatives, announced today the availability of Apache™ Drill™ 1.0, the schema-free SQL query engine for Apache Hadoop®, NoSQL and Cloud storage.
+Forest Hill, MD --19 May 2015-- The Apache Software Foundation (ASF), the all-volunteer developers, stewards, and incubators of more than 350 Open Source projects and initiatives, announced today the availability of Apache™ Drill™ 1.0, the schema-free SQL query engine for Apache Hadoop®, NoSQL and Cloud storage.
 
 "The production-ready 1.0 release represents a significant milestone for the Drill project," said Tomer Shiran, member of the Apache Drill Project Management Committee. "It is the outcome of almost three years of development involving dozens of engineers from numerous companies. Apache Drill's flexibility and ease-of-use have attracted thousands of users, and the enterprise-grade reliability, security and performance in the 1.0 release will further accelerate adoption."
 
-With the exponential growth of data in recent years, and the shift towards rapid application development, new data is increasingly being stored in non-relational, schema-free datastores including Hadoop, NoSQL and Cloud storage. Apache Drill enables analysts, business users, data scientists and developers to explore and analyze this data without sacrificing the flexibility and agility offered by these datastores. Drill processes the data in-situ without requiring users to define schemas or transform data.
+With the exponential growth of data in recent years, and the shift towards rapid application development, new data is increasingly being stored in non-relational, schema-free datastores including Hadoop, NoSQL and Cloud storage. Apache Drill revolutionizes data exploration and analytics by enabling analysts, business users, data scientists and developers to explore and analyze this data without sacrificing the flexibility and agility offered by these datastores. Drill processes the data in-situ without requiring users to define schemas or transform data.
 
-"Drill introduces the JSON document model to the world of SQL-based analytics and BI" said Jacques Nadeau, Vice President of Apache Drill. "This enables users to query fixed-schema, evolving-schema and schema-free data stored in a variety of formats and datastores. The architecture of relational query engines and databases is built on the assumption that all data has a simple and static structure that’s known in advance, and this 40-year-old assumption is simply no longer valid. We designed Drill from the ground up to address the new reality."
+"Drill introduces the JSON document model to the world of SQL-based analytics and BI" said Jacques Nadeau, Vice President of Apache Drill. "This enables users to query fixed-schema, evolving-schema and schema-free data stored in a variety of formats and datastores. The architecture of relational query engines and databases is built on the assumption that all data has a simple and static structure that’s known in advance, and this 40-year-old assumption is simply no longer valid. We designed Drill from the ground up to address the new reality.”
 
 Apache Drill's architecture is unique in many ways. It is the only columnar execution engine that supports complex and schema-free data, and the only execution engine that performs data-driven query compilation (and re-compilation, also known as schema discovery) during query execution. These unique capabilities enable Drill to achieve record-breaking performance with the flexibility offered by the JSON document model.
 
+The business intelligence (BI) partner ecosystem is embracing the power of Apache Drill. Organizations such as Information Builders, JReport (Jinfonet Software), MicroStrategy, Qlik®, Simba, Tableau, and TIBCO, are working closely with the Drill community to interoperate BI tools with Drill through standard ODBC and JDBC connectivity. This collaboration enables end users to explore data by leveraging sophisticated visualization tools and advanced analytics.
+
+"We've been using Apache Drill for the past six months," said Andrew Hamilton, CTO of Cardlytics. "Its ease of deployment and use along with its ability to quickly process trillions of records has made it an invaluable tool inside Cardlytics. Queries that were previously insurmountable are now common occurrence. Congratulations to the Drill community on this momentous occasion."
+
 "Drill's columnar execution engine and optimizer take full advantage of Apache Parquet's columnar storage to achieve maximum performance," said Julien Le Dem, Technical Lead of Data Processing at Twitter and Vice President of Apache Parquet. "The Drill team has been a key contributor to the Parquet project, including recent enhancements to Parquet types and vectorization. The Drill team’s involvement in the Parquet community is instrumental in driving the standard."
 
-"Apache Drill 1.0 raises the bar for secure, reliable and scalable SQL-on-Hadoop," said Piyush Bhargava, distinguished engineer, IT, Cisco Systems.  "Because Drill integrates with existing data virtualization and visualization tools, we expect it will improve adoption of self-service data exploration and large-scale BI queries on our advanced Hadoop platform at Cisco."  
+"Apache Drill 1.0 raises the bar for secure, reliable and scalable SQL-on-Hadoop," said Piyush Bhargava, distinguished engineer, IT, Cisco Systems. "Because Drill integrates with existing data virtualization and visualization tools, we expect it will improve adoption of self-service data exploration and large-scale BI queries on our advanced Hadoop platform at Cisco."
+
+"MicroStrategy recognized early on the value of Apache Drill and is one of the first analytic platforms to certify Drill," said Tim Lang, senior executive vice president and chief technology officer at MicroStrategy Incorporated.  "Because Drill is designed to be used with a minimal learning curve, it opens up more complex data sets to the end user who can immediately visualize and analyze new information using MicroStrategy’s advanced capabilities."
 
-"Apache Drill closes a gap around self-service SQL queries in Hadoop, especially on complex, dynamic NoSQL data types," said Mike Foster, strategic alliances technology officer, Qlik.  "Drill's performance advantages for Hadoop data access, combined with the Qlik associative experience, enables our customers to continue discovering business value from a wide range of data. Congrats to the Apache Drill community."
+"Apache Drill closes a gap around self-service SQL queries in Hadoop, especially on complex, dynamic NoSQL data types," said Mike Foster, Strategic Alliances Technology Officer at Qlik.  "Drill's performance advantages for Hadoop data access, combined with the Qlik associative experience, enables our customers to continue discovering business value from a wide range of data. Congratulations to the Apache Drill community."
 
 "Apache Drill empowers people to access data that is traditionally difficult to work with," said Jeff Feng, product manager, Tableau.  "Direct access within a centralized data repository and without pre-generating metadata definitions encourages data democracy which is essential for data-driven organizations. Additionally, Drill's instant and secure access to complex data formats, such as JSON, opens up extended analytical opportunities."
 
-"Congratulations to the Apache Drill community on the availability of 1.0," said Karl Van den Bergh, vice president, products and cloud, TIBCO. "Drill promises to bring low-latency access to data stored in Hadoop and HBase via standard SQL semantics. This innovation is in line with the value of Fast Data analysis, which TIBCO customers welcome and appreciate."
+"Congratulations to the Apache Drill community on the availability of 1.0," said Karl Van den Bergh, Vice President, Products and Cloud at TIBCO. "Drill promises to bring low-latency access to data stored in Hadoop and HBase via standard SQL semantics. This innovation is in line with the value of Fast Data analysis, which TIBCO customers welcome and appreciate."
+
+"The community's accomplishment is a testament to The Apache Software Foundation's ability to bring together diverse companies to work towards a common goal. None of this would have been possible without the contribution of engineers with advanced degrees and experience in relational databases, data warehousing, MPP, query optimization, Hadoop and NoSQL," added Nadeau. "Our community's strength is what will solidify Apache Drill as a key data technology for the next decade. We welcome interested individuals to learn more about Drill by joining the community's mailing lists, attending upcoming talks by Drill code committers at various conferences including Hadoop Summit, NoSQL Now, Hadoop World, or at a local Apache Drill MeetUp."
 
 Availability and Oversight
 Apache Drill 1.0 is available immediately as a free download from http://drill.apache.org/download/. Documentation is available at http://drill.apache.org/docs/. As with all Apache products, Apache Drill software is released under the Apache License v2.0, and is overseen by a self-selected team of active contributors to the project. A Project Management Committee (PMC) guides the project's day-to-day operations, including community development and product releases. For ways to become involved with Apache Drill, visit http://drill.apache.org/ and @ApacheDrill on Twitter.
@@ -52,3 +60,5 @@ About The Apache Software Foundation (ASF)
 Established in 1999, the all-volunteer Foundation oversees more than 350 leading Open Source projects, including Apache HTTP Server --the world's most popular Web server software. Through the ASF's meritocratic process known as "The Apache Way," more than 500 individual Members and 4,500 Committers successfully collaborate to develop freely available enterprise-grade software, benefiting millions of users worldwide: thousands of software solutions are distributed under the Apache License; and the community actively participates in ASF mailing lists, mentoring initiatives, and ApacheCon, the Foundation's official user conference, trainings, and expo. The ASF is a US 501(c)(3) charitable organization, funded by individual donations and corporate sponsors including Bloomberg, Budget Direct, Cerner, Citrix, Cloudera, Comcast, Facebook, Google, Hortonworks, HP, IBM, InMotion Hosting, iSigma, Matt Mullenweg, Microsoft, Pivotal, Produban, WANdisco, and Yahoo. For more information, visit ht
 tp://www.apache.org/ or follow @TheASF on Twitter.
 
 © The Apache Software Foundation. "Apache", "Apache Drill", "Drill", "Apache Hadoop", "Hadoop", "Apache Parquet", "Parquet", and "ApacheCon", are registered trademarks or trademarks of The Apache Software Foundation. All other brands and trademarks are the property of their respective owners.
+
+# # #


[05/31] drill git commit: avro support

Posted by ts...@apache.org.
avro support


Project: http://git-wip-us.apache.org/repos/asf/drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/drill/commit/313dbd6d
Tree: http://git-wip-us.apache.org/repos/asf/drill/tree/313dbd6d
Diff: http://git-wip-us.apache.org/repos/asf/drill/diff/313dbd6d

Branch: refs/heads/gh-pages
Commit: 313dbd6d82a9b5f90544afae94d0a95bc0d5fa07
Parents: c240e01
Author: Kristine Hahn <kh...@maprtech.com>
Authored: Sun May 17 12:26:42 2015 -0700
Committer: Kristine Hahn <kh...@maprtech.com>
Committed: Sun May 17 12:26:42 2015 -0700

----------------------------------------------------------------------
 .../035-plugin-configuration-introduction.md                 | 4 +++-
 .../connect-a-data-source/080-drill-default-input-format.md  | 8 +++++---
 .../010-data-sources-and-file-formats-introduction.md        | 5 ++++-
 _docs/getting-started/010-drill-introduction.md              | 1 +
 .../005-querying-a-file-system-introduction.md               | 1 +
 5 files changed, 14 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/drill/blob/313dbd6d/_docs/connect-a-data-source/035-plugin-configuration-introduction.md
----------------------------------------------------------------------
diff --git a/_docs/connect-a-data-source/035-plugin-configuration-introduction.md b/_docs/connect-a-data-source/035-plugin-configuration-introduction.md
index 332966c..0cfc206 100644
--- a/_docs/connect-a-data-source/035-plugin-configuration-introduction.md
+++ b/_docs/connect-a-data-source/035-plugin-configuration-introduction.md
@@ -78,7 +78,7 @@ The following table describes the attributes you configure for storage plugins i
   </tr>
   <tr>
     <td>"formats"</td>
-    <td>"psv"<br>"csv"<br>"tsv"<br>"parquet"<br>"json"<br>"maprdb"</td>
+    <td>"psv"<br>"csv"<br>"tsv"<br>"parquet"<br>"json"<br>"avro"<br>"maprdb"*</td>
     <td>yes</td>
     <td>One or more file formats of data Drill can read. Drill can implicitly detect some file formats based on the file extension or the first few bits of data within the file, but you need to configure an option for others.</td>
   </tr>
@@ -102,6 +102,8 @@ The following table describes the attributes you configure for storage plugins i
   </tr>
 </table>
 
+\* Only appears when you install Drill on a cluster using the mapr-drill package.
+
 The configuration of other attributes, such as `size.calculator.enabled` in the hbase plugin and `configProps` in the hive plugin, are implementation-dependent and beyond the scope of this document.
 
 Although Drill can work with different file types in the same directory, restricting a Drill workspace to one file type prevents confusion.

http://git-wip-us.apache.org/repos/asf/drill/blob/313dbd6d/_docs/connect-a-data-source/080-drill-default-input-format.md
----------------------------------------------------------------------
diff --git a/_docs/connect-a-data-source/080-drill-default-input-format.md b/_docs/connect-a-data-source/080-drill-default-input-format.md
index 25a065b..7c41e91 100644
--- a/_docs/connect-a-data-source/080-drill-default-input-format.md
+++ b/_docs/connect-a-data-source/080-drill-default-input-format.md
@@ -21,11 +21,13 @@ default input format, and Drill cannot detect the file format, the query
 fails. You can define a default input format for any of the file types that
 Drill supports. Currently, Drill supports the following types:
 
-  * CSV
-  * TSV
-  * PSV
+  * Avro
+  * CSV, TSV, or PSV
   * Parquet
   * JSON
+  * MapR-DB*
+
+\* Only available when you install Drill on a cluster using the mapr-drill package.
 
 ## Defining a Default Input Format
 

http://git-wip-us.apache.org/repos/asf/drill/blob/313dbd6d/_docs/data-sources-and-file-formats/010-data-sources-and-file-formats-introduction.md
----------------------------------------------------------------------
diff --git a/_docs/data-sources-and-file-formats/010-data-sources-and-file-formats-introduction.md b/_docs/data-sources-and-file-formats/010-data-sources-and-file-formats-introduction.md
index 8ec8cee..d758a50 100644
--- a/_docs/data-sources-and-file-formats/010-data-sources-and-file-formats-introduction.md
+++ b/_docs/data-sources-and-file-formats/010-data-sources-and-file-formats-introduction.md
@@ -11,11 +11,14 @@ Included in the data sources that  Drill supports are these key data sources:
 
 Drill supports the following input formats for data:
 
+* [Avro](http://avro.apache.org/docs/current/spec.html)
 * CSV (Comma-Separated-Values)
 * TSV (Tab-Separated-Values)
 * PSV (Pipe-Separated-Values)
 * Parquet
-* JSON
+* MapR-DB*
+
+\* Only available when you install Drill on a cluster using the mapr-drill package.
 
 You set the input format for data coming from data sources to Drill in the workspace portion of the [storage plugin]({{ site.baseurl }}/docs/storage-plugin-registration) definition. The default input format in Drill is Parquet. 
 

http://git-wip-us.apache.org/repos/asf/drill/blob/313dbd6d/_docs/getting-started/010-drill-introduction.md
----------------------------------------------------------------------
diff --git a/_docs/getting-started/010-drill-introduction.md b/_docs/getting-started/010-drill-introduction.md
index 0586738..100d9da 100644
--- a/_docs/getting-started/010-drill-introduction.md
+++ b/_docs/getting-started/010-drill-introduction.md
@@ -16,6 +16,7 @@ Apache Drill 1.0 offers the following new features:
 * [Query audit logging]({{site.baseurl}}/docs/getting-query-information/) for getting the query history on a Drillbit.
 * Improved connection handling.
 * New Errors tab in the Query Profiles UI that facilitates troubleshooting and distributed storing of profiles.
+* Support for new storage plugin format: [Avro](http://avro.apache.org/docs/current/spec.html)
 
 Key features of Apache Drill are:
 

http://git-wip-us.apache.org/repos/asf/drill/blob/313dbd6d/_docs/query-data/query-a-file-system/005-querying-a-file-system-introduction.md
----------------------------------------------------------------------
diff --git a/_docs/query-data/query-a-file-system/005-querying-a-file-system-introduction.md b/_docs/query-data/query-a-file-system/005-querying-a-file-system-introduction.md
index 1238eba..6d204ca 100644
--- a/_docs/query-data/query-a-file-system/005-querying-a-file-system-introduction.md
+++ b/_docs/query-data/query-a-file-system/005-querying-a-file-system-introduction.md
@@ -26,6 +26,7 @@ Drill supports the following file types:
     * Tab-separated values (TSV, type: text)
     * Pipe-separated values (PSV, type: text)
   * Structured data files:
+    * Avro(type: avro)
     * JSON (type: json)
     * Parquet (type: parquet)
 


[03/31] drill git commit: add soft release notes

Posted by ts...@apache.org.
add soft release notes


Project: http://git-wip-us.apache.org/repos/asf/drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/drill/commit/2c75f6d5
Tree: http://git-wip-us.apache.org/repos/asf/drill/tree/2c75f6d5
Diff: http://git-wip-us.apache.org/repos/asf/drill/diff/2c75f6d5

Branch: refs/heads/gh-pages
Commit: 2c75f6d509395e579c68923c116e7989dd0df36a
Parents: 64910ec
Author: Kristine Hahn <kh...@maprtech.com>
Authored: Sun May 17 11:24:25 2015 -0700
Committer: Kristine Hahn <kh...@maprtech.com>
Committed: Sun May 17 11:24:25 2015 -0700

----------------------------------------------------------------------
 _docs/getting-started/010-drill-introduction.md | 13 +++++++++++--
 1 file changed, 11 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/drill/blob/2c75f6d5/_docs/getting-started/010-drill-introduction.md
----------------------------------------------------------------------
diff --git a/_docs/getting-started/010-drill-introduction.md b/_docs/getting-started/010-drill-introduction.md
index 2c3c1c4..0586738 100644
--- a/_docs/getting-started/010-drill-introduction.md
+++ b/_docs/getting-started/010-drill-introduction.md
@@ -7,8 +7,17 @@ Drill is designed from the ground up to support high-performance analysis on
 the semi-structured and rapidly evolving data coming from modern Big Data
 applications, while still providing the familiarity and ecosystem of ANSI SQL,
 the industry-standard query language. Drill provides plug-and-play integration
-with existing Apache Hive and Apache HBase deployments. Apache Drill offers
-the following key features:
+with existing Apache Hive and Apache HBase deployments. 
+
+Apache Drill 1.0 offers the following new features:
+
+* Many performance planning and execution improvements, including a new text reader for faster join planning that complies with RFC 4180.
+* Updated [Drill shell]({{site.baseurl}}/docs/configuring-the-drill-shell/#examples-of-configuring-the-drill-shell) and now formats query results having fewer than 70 characters in a column.
+* [Query audit logging]({{site.baseurl}}/docs/getting-query-information/) for getting the query history on a Drillbit.
+* Improved connection handling.
+* New Errors tab in the Query Profiles UI that facilitates troubleshooting and distributed storing of profiles.
+
+Key features of Apache Drill are:
 
   * Low-latency SQL queries
   * Dynamic queries on self-describing data in files (such as JSON, Parquet, text) and MapR-DB/HBase tables, without requiring metadata definitions in the Hive metastore.


[23/31] drill git commit: add BB's clause pages, renamed sql command pages

Posted by ts...@apache.org.
http://git-wip-us.apache.org/repos/asf/drill/blob/9700ff63/_docs/configure-drill/070-configuring-user-impersonation.md
----------------------------------------------------------------------
diff --git a/_docs/configure-drill/070-configuring-user-impersonation.md b/_docs/configure-drill/070-configuring-user-impersonation.md
index 6203ca1..dbcdaf0 100644
--- a/_docs/configure-drill/070-configuring-user-impersonation.md
+++ b/_docs/configure-drill/070-configuring-user-impersonation.md
@@ -43,7 +43,7 @@ The following table lists the clients, storage plugins, and types of queries tha
 </table>
 
 ## Impersonation and Views
-You can use views with impersonation to provide granular access to data and protect sensitive information. When you create a view, Drill stores the view definition in a file and suffixes the file with .drill.view. For example, if you create a view named myview, Drill creates a view file named myview.drill.view and saves it in the current workspace or the workspace specified, such as dfs.views.myview. See [CREATE VIEW]({{site.baseurl}}/docs/create-view-command/) Command.
+You can use views with impersonation to provide granular access to data and protect sensitive information. When you create a view, Drill stores the view definition in a file and suffixes the file with .drill.view. For example, if you create a view named myview, Drill creates a view file named myview.drill.view and saves it in the current workspace or the workspace specified, such as dfs.views.myview. See [CREATE VIEW]({{site.baseurl}}/docs/create-view) Command.
 
 You can create a view and grant read permissions on the view to give other users access to the data that the view references. When a user queries the view, Drill impersonates the view owner to access the underlying data. A user with read access to a view can create new views from the originating view to further restrict access on data.
 

http://git-wip-us.apache.org/repos/asf/drill/blob/9700ff63/_docs/data-sources-and-file-formats/050-json-data-model.md
----------------------------------------------------------------------
diff --git a/_docs/data-sources-and-file-formats/050-json-data-model.md b/_docs/data-sources-and-file-formats/050-json-data-model.md
index 548b709..a793dbf 100644
--- a/_docs/data-sources-and-file-formats/050-json-data-model.md
+++ b/_docs/data-sources-and-file-formats/050-json-data-model.md
@@ -95,7 +95,7 @@ You can write data from Drill to a JSON file. The following setup is required:
         CREATE TABLE my_json AS
         SELECT my column from dfs.`<path_file_name>`;
 
-Drill performs the following actions, as shown in the complete [CTAS command example]({{ site.baseurl }}/docs/create-table-as-ctas-command):
+Drill performs the following actions, as shown in the complete [CTAS command example]({{ site.baseurl }}/docs/create-table-as-ctas/):
    
 * Creates a directory using table name.
 * Writes the JSON data to the directory in the workspace location.

http://git-wip-us.apache.org/repos/asf/drill/blob/9700ff63/_docs/sql-reference/040-operators.md
----------------------------------------------------------------------
diff --git a/_docs/sql-reference/040-operators.md b/_docs/sql-reference/040-operators.md
index 2de460e..4eb83f9 100644
--- a/_docs/sql-reference/040-operators.md
+++ b/_docs/sql-reference/040-operators.md
@@ -60,7 +60,7 @@ You can use the following subquery operators in your Drill queries:
   * EXISTS
   * IN
 
-See [SELECT Statements]({{ site.baseurl }}/docs/select-statements).
+See [SELECT Statements]({{ site.baseurl }}/docs/select).
 
 ## String Concatenate Operator
 

http://git-wip-us.apache.org/repos/asf/drill/blob/9700ff63/_docs/sql-reference/sql-commands/005-supported-sql-commands.md
----------------------------------------------------------------------
diff --git a/_docs/sql-reference/sql-commands/005-supported-sql-commands.md b/_docs/sql-reference/sql-commands/005-supported-sql-commands.md
index 233019f..37a08eb 100644
--- a/_docs/sql-reference/sql-commands/005-supported-sql-commands.md
+++ b/_docs/sql-reference/sql-commands/005-supported-sql-commands.md
@@ -6,4 +6,4 @@ The following table provides a list of the SQL commands that Drill supports,
 with their descriptions and example syntax:
 
 <table style='table-layout:fixed;width:100%'>
-    <tr><th >Command</th><th >Description</th><th >Syntax</th></tr><tr><td valign="top" width="15%"><a href="/docs/alter-session-command">ALTER SESSION</a></td><td valign="top" width="60%">Changes a system setting for the duration of a session. A session ends when you quit the Drill shell. For a list of Drill options and their descriptions, refer to <a href="/docs/planning-and-execution-options">Planning and Execution Options</a>.</td><td valign="top"><pre>ALTER SESSION SET `&lt;option_name&gt;`=&lt;value&gt;;</pre></td></tr><tr><td valign="top" ><a href="/docs/alter-system-command">ALTER SYSTEM</a></td><td valign="top" >Permanently changes a system setting. The new settings persist across all sessions. For a list of Drill options and their descriptions, refer to <a href="/docs/planning-and-execution-options">Planning and Execution Options</a>.</td><td valign="top" ><pre>ALTER SYSTEM SET `&lt;option_name&gt;`=&lt;value&gt;;</pre></td></tr><tr><td valign="top" ><p><a href="/docs/crea
 te-table-as--ctas-command">CREATE TABLE AS<br />(CTAS)</a></p></td><td valign="top" >Creates a new table and populates the new table with rows returned from a SELECT query. Use the CREATE TABLE AS (CTAS) statement in place of INSERT INTO. When you issue the CTAS command, you create a directory that contains parquet or CSV files. Each workspace in a file system has a default file type.<br />You can specify which writer you want Drill to use when creating a table: parquet, CSV, or JSON (as specified with the <code>store.format</code> option).</td><td valign="top" ><pre class="programlisting">CREATE TABLE new_table_name AS &lt;query&gt;;</pre></td></tr><tr><td - valign="top" ><a href="/docs/create-view-command">CREATE VIEW </a></td><td - valign="top" >Creates a virtual structure for the result set of a stored query.-</td><td -valign="top" ><pre>CREATE [OR REPLACE] VIEW [workspace.]view_name [ (column_name [, ...]) ] AS &lt;query&gt;;</pre></td></tr><tr><td  valign="top" ><a href="/docs
 /describe-command">DESCRIBE</a></td><td  valign="top" >Returns information about columns in a table or view.</td><td valign="top" ><pre>DESCRIBE [workspace.]table_name|view_name</pre></td></tr><tr><td valign="top" ><a href="/docs/drop-view-command">DROP VIEW</a></td><td valign="top" >Removes a view.</td><td valign="top" ><pre>DROP VIEW [workspace.]view_name ;</pre></td></tr><tr><td  valign="top" ><a href="/docs/explain-commands">EXPLAIN PLAN FOR</a></td><td valign="top" >Returns the physical plan for a particular query.</td><td valign="top" ><pre>EXPLAIN PLAN FOR &lt;query&gt;;</pre></td></tr><tr><td valign="top" ><a href="/docs/explain-commands">EXPLAIN PLAN WITHOUT IMPLEMENTATION FOR</a></td><td valign="top" >Returns the logical plan for a particular query.</td><td  valign="top" ><pre>EXPLAIN PLAN WITHOUT IMPLEMENTATION FOR &lt;query&gt;;</pre></td></tr><tr><td colspan="1" valign="top" ><a href="/docs/select-statements" rel="nofollow">SELECT</a></td><td valign="top" >Retrieves dat
 a from tables and files.</td><td  valign="top" ><pre>[WITH subquery]<br />SELECT column_list FROM table_name <br />[WHERE clause]<br />[GROUP BY clause]<br />[HAVING clause]<br />[ORDER BY clause];</pre></td></tr><tr><td  valign="top" ><a href="/docs/show-databases-and-show-schemas-commands">SHOW DATABASES </a></td><td valign="top" >Returns a list of available schemas. Equivalent to SHOW SCHEMAS.</td><td valign="top" ><pre>SHOW DATABASES;</pre></td></tr><tr><td valign="top" ><a href="/docs/show-files-command" >SHOW FILES</a></td><td valign="top" >Returns a list of files in a file system schema.</td><td valign="top" ><pre>SHOW FILES IN filesystem.`schema_name`;<br />SHOW FILES FROM filesystem.`schema_name`;</pre></td></tr><tr><td valign="top" ><a href="/docs/show-databases-and-show-schemas-commands">SHOW SCHEMAS</a></td><td - valign="top" >Returns a list of available schemas. Equivalent to SHOW DATABASES.</td><td valign="top" ><pre>SHOW SCHEMAS;</pre></td></tr><tr><td valign="top" ><
 a href="/docs/show-tables-command">SHOW TABLES</a></td><td valign="top" >Returns a list of tables and views.</td><td valign="top" ><pre>SHOW TABLES;</pre></td></tr><tr><td valign="top" ><a href="/docs/use-command">USE</a></td><td valign="top" >Change to a particular schema. When you opt to use a particular schema, Drill issues queries on that schema only.</td><td valign="top" ><pre>USE schema_name;</pre></td></tr></table>
+    <tr><th >Command</th><th >Description</th><th >Syntax</th></tr><tr><td valign="top" width="15%"><a href="/docs/alter-session">ALTER SESSION</a></td><td valign="top" width="60%">Changes a system setting for the duration of a session. A session ends when you quit the Drill shell. For a list of Drill options and their descriptions, refer to <a href="/docs/planning-and-execution-options">Planning and Execution Options</a>.</td><td valign="top"><pre>ALTER SESSION SET `&lt;option_name&gt;`=&lt;value&gt;;</pre></td></tr><tr><td valign="top" ><a href="/docs/alter-system">ALTER SYSTEM</a></td><td valign="top" >Permanently changes a system setting. The new settings persist across all sessions. For a list of Drill options and their descriptions, refer to <a href="/docs/planning-and-execution-options">Planning and Execution Options</a>.</td><td valign="top" ><pre>ALTER SYSTEM SET `&lt;option_name&gt;`=&lt;value&gt;;</pre></td></tr><tr><td valign="top" ><p><a href="/docs/create-table-as--cta
 s">CREATE TABLE AS<br />(CTAS)</a></p></td><td valign="top" >Creates a new table and populates the new table with rows returned from a SELECT query. Use the CREATE TABLE AS (CTAS) statement in place of INSERT INTO. When you issue the CTAS command, you create a directory that contains parquet or CSV files. Each workspace in a file system has a default file type.<br />You can specify which writer you want Drill to use when creating a table: parquet, CSV, or JSON (as specified with the <code>store.format</code> option).</td><td valign="top" ><pre class="programlisting">CREATE TABLE new_table_name AS &lt;query&gt;;</pre></td></tr><tr><td - valign="top" ><a href="/docs/create-view">CREATE VIEW </a></td><td - valign="top" >Creates a virtual structure for the result set of a stored query.-</td><td -valign="top" ><pre>CREATE [OR REPLACE] VIEW [workspace.]view_name [ (column_name [, ...]) ] AS &lt;query&gt;;</pre></td></tr><tr><td  valign="top" ><a href="/docs/describe">DESCRIBE</a></td><td 
  valign="top" >Returns information about columns in a table or view.</td><td valign="top" ><pre>DESCRIBE [workspace.]table_name|view_name</pre></td></tr><tr><td valign="top" ><a href="/docs/drop-view-command">DROP VIEW</a></td><td valign="top" >Removes a view.</td><td valign="top" ><pre>DROP VIEW [workspace.]view_name ;</pre></td></tr><tr><td  valign="top" ><a href="/docs/explain">EXPLAIN PLAN FOR</a></td><td valign="top" >Returns the physical plan for a particular query.</td><td valign="top" ><pre>EXPLAIN PLAN FOR &lt;query&gt;;</pre></td></tr><tr><td valign="top" ><a href="/docs/explain">EXPLAIN PLAN WITHOUT IMPLEMENTATION FOR</a></td><td valign="top" >Returns the logical plan for a particular query.</td><td  valign="top" ><pre>EXPLAIN PLAN WITHOUT IMPLEMENTATION FOR &lt;query&gt;;</pre></td></tr><tr><td colspan="1" valign="top" ><a href="/docs/select" rel="nofollow">SELECT</a></td><td valign="top" >Retrieves data from tables and files.</td><td  valign="top" ><pre>[WITH subquery]<
 br />SELECT column_list FROM table_name <br />[WHERE clause]<br />[GROUP BY clause]<br />[HAVING clause]<br />[ORDER BY clause];</pre></td></tr><tr><td  valign="top" ><a href="/docs/show-databases-and-show-schemas">SHOW DATABASES </a></td><td valign="top" >Returns a list of available schemas. Equivalent to SHOW SCHEMAS.</td><td valign="top" ><pre>SHOW DATABASES;</pre></td></tr><tr><td valign="top" ><a href="/docs/show-files" >SHOW FILES</a></td><td valign="top" >Returns a list of files in a file system schema.</td><td valign="top" ><pre>SHOW FILES IN filesystem.`schema_name`;<br />SHOW FILES FROM filesystem.`schema_name`;</pre></td></tr><tr><td valign="top" ><a href="/docs/show-databases-and-show-schemas">SHOW SCHEMAS</a></td><td - valign="top" >Returns a list of available schemas. Equivalent to SHOW DATABASES.</td><td valign="top" ><pre>SHOW SCHEMAS;</pre></td></tr><tr><td valign="top" ><a href="/docs/show-tables">SHOW TABLES</a></td><td valign="top" >Returns a list of tables and v
 iews.</td><td valign="top" ><pre>SHOW TABLES;</pre></td></tr><tr><td valign="top" ><a href="/docs/use">USE</a></td><td valign="top" >Change to a particular schema. When you opt to use a particular schema, Drill issues queries on that schema only.</td><td valign="top" ><pre>USE schema_name;</pre></td></tr></table>

http://git-wip-us.apache.org/repos/asf/drill/blob/9700ff63/_docs/sql-reference/sql-commands/010-alter-session-command.md
----------------------------------------------------------------------
diff --git a/_docs/sql-reference/sql-commands/010-alter-session-command.md b/_docs/sql-reference/sql-commands/010-alter-session-command.md
deleted file mode 100644
index c3bdc86..0000000
--- a/_docs/sql-reference/sql-commands/010-alter-session-command.md
+++ /dev/null
@@ -1,74 +0,0 @@
----
-title: "ALTER SESSION Command"
-parent: "SQL Commands"
----
-The ALTER SESSION command changes a system setting for the duration of a
-session. Session level settings override system level settings.
-
-## Syntax
-
-The ALTER SESSION command supports the following syntax:
-
-    ALTER SESSION SET `<option_name>`=<value>;
-
-## Parameters
-
-*option_name*  
-This is the option name as it appears in the systems table.
-
-*value*  
-A value of the type listed in the sys.options table: number, string, boolean,
-or float. Use the appropriate value type for each option that you set.
-
-## Usage Notes
-
-Use the ALTER SESSION command to set Drill query planning and execution
-options per session in a cluster. The options that you set using the ALTER
-SESSION command only apply to queries that run during the current Drill
-connection. A session ends when you quit the Drill shell. You can set any of
-the system level options at the session level.
-
-You can run the following query to see a complete list of planning and
-execution options that are currently set at the system or session level:
-
-    0: jdbc:drill:zk=local> SELECT name, type FROM sys.options WHERE type in ('SYSTEM','SESSION') order by name;
-    +------------+----------------------------------------------+
-    |   name                                       |    type    |
-    +----------------------------------------------+------------+
-    | drill.exec.functions.cast_empty_string_to_null | SYSTEM   |
-    | drill.exec.storage.file.partition.column.label | SYSTEM   |
-    | exec.errors.verbose                          | SYSTEM     |
-    | exec.java_compiler                           | SYSTEM     |
-    | exec.java_compiler_debug                     | SYSTEM     |
-    …
-    +------------+----------------------------------------------+
-
-{% include startnote.html %}This is a truncated version of the list.{% include endnote.html %}
-
-## Example
-
-This example demonstrates how to use the ALTER SESSION command to set the
-`store.json.all_text_mode` option to “true” for the current Drill session.
-Setting this option to “true” enables text mode so that Drill reads everything
-in JSON as a text object instead of trying to interpret data types. This
-allows complicated JSON to be read using CASE and CAST.
-
-    0: jdbc:drill:zk=local> alter session set `store.json.all_text_mode`= true;
-    +------------+------------+
-    |   ok  |  summary   |
-    +------------+------------+
-    | true      | store.json.all_text_mode updated. |
-    +------------+------------+
-    1 row selected (0.046 seconds)
-
-You can issue a query to see all of the session level settings. Note that the
-option type is case-sensitive.
-
-    0: jdbc:drill:zk=local> SELECT name, type, bool_val FROM sys.options WHERE type = 'SESSION' order by name;
-    +------------+------------+------------+
-    |   name    |   type    |  bool_val  |
-    +------------+------------+------------+
-    | store.json.all_text_mode | SESSION    | true      |
-    +------------+------------+------------+
-    1 row selected (0.176 seconds)
-

http://git-wip-us.apache.org/repos/asf/drill/blob/9700ff63/_docs/sql-reference/sql-commands/010-alter-session.md
----------------------------------------------------------------------
diff --git a/_docs/sql-reference/sql-commands/010-alter-session.md b/_docs/sql-reference/sql-commands/010-alter-session.md
new file mode 100644
index 0000000..5173b57
--- /dev/null
+++ b/_docs/sql-reference/sql-commands/010-alter-session.md
@@ -0,0 +1,74 @@
+---
+title: "ALTER SESSION"
+parent: "SQL Commands"
+---
+The ALTER SESSION command changes a system setting for the duration of a
+session. Session level settings override system level settings.
+
+## Syntax
+
+The ALTER SESSION command supports the following syntax:
+
+    ALTER SESSION SET `<option_name>`=<value>;
+
+## Parameters
+
+*option_name*  
+This is the option name as it appears in the systems table.
+
+*value*  
+A value of the type listed in the sys.options table: number, string, boolean,
+or float. Use the appropriate value type for each option that you set.
+
+## Usage Notes
+
+Use the ALTER SESSION command to set Drill query planning and execution
+options per session in a cluster. The options that you set using the ALTER
+SESSION command only apply to queries that run during the current Drill
+connection. A session ends when you quit the Drill shell. You can set any of
+the system level options at the session level.
+
+You can run the following query to see a complete list of planning and
+execution options that are currently set at the system or session level:
+
+    0: jdbc:drill:zk=local> SELECT name, type FROM sys.options WHERE type in ('SYSTEM','SESSION') order by name;
+    +------------+----------------------------------------------+
+    |   name                                       |    type    |
+    +----------------------------------------------+------------+
+    | drill.exec.functions.cast_empty_string_to_null | SYSTEM   |
+    | drill.exec.storage.file.partition.column.label | SYSTEM   |
+    | exec.errors.verbose                          | SYSTEM     |
+    | exec.java_compiler                           | SYSTEM     |
+    | exec.java_compiler_debug                     | SYSTEM     |
+    …
+    +------------+----------------------------------------------+
+
+{% include startnote.html %}This is a truncated version of the list.{% include endnote.html %}
+
+## Example
+
+This example demonstrates how to use the ALTER SESSION command to set the
+`store.json.all_text_mode` option to “true” for the current Drill session.
+Setting this option to “true” enables text mode so that Drill reads everything
+in JSON as a text object instead of trying to interpret data types. This
+allows complicated JSON to be read using CASE and CAST.
+
+    0: jdbc:drill:zk=local> alter session set `store.json.all_text_mode`= true;
+    +------------+------------+
+    |   ok  |  summary   |
+    +------------+------------+
+    | true      | store.json.all_text_mode updated. |
+    +------------+------------+
+    1 row selected (0.046 seconds)
+
+You can issue a query to see all of the session level settings. Note that the
+option type is case-sensitive.
+
+    0: jdbc:drill:zk=local> SELECT name, type, bool_val FROM sys.options WHERE type = 'SESSION' order by name;
+    +------------+------------+------------+
+    |   name    |   type    |  bool_val  |
+    +------------+------------+------------+
+    | store.json.all_text_mode | SESSION    | true      |
+    +------------+------------+------------+
+    1 row selected (0.176 seconds)
+

http://git-wip-us.apache.org/repos/asf/drill/blob/9700ff63/_docs/sql-reference/sql-commands/020-alter-system.md
----------------------------------------------------------------------
diff --git a/_docs/sql-reference/sql-commands/020-alter-system.md b/_docs/sql-reference/sql-commands/020-alter-system.md
index a351ac8..cc7a9f1 100644
--- a/_docs/sql-reference/sql-commands/020-alter-system.md
+++ b/_docs/sql-reference/sql-commands/020-alter-system.md
@@ -1,5 +1,5 @@
 ---
-title: "ALTER SYSTEM Command"
+title: "ALTER SYSTEM"
 parent: "SQL Commands"
 ---
 The ALTER SYSTEM command permanently changes a system setting. The new setting

http://git-wip-us.apache.org/repos/asf/drill/blob/9700ff63/_docs/sql-reference/sql-commands/030-create-table-as-command.md
----------------------------------------------------------------------
diff --git a/_docs/sql-reference/sql-commands/030-create-table-as-command.md b/_docs/sql-reference/sql-commands/030-create-table-as-command.md
deleted file mode 100644
index 8e0a4e1..0000000
--- a/_docs/sql-reference/sql-commands/030-create-table-as-command.md
+++ /dev/null
@@ -1,134 +0,0 @@
----
-title: "CREATE TABLE AS (CTAS) Command"
-parent: "SQL Commands"
----
-You can create tables in Drill by using the CTAS command:
-
-    CREATE TABLE new_table_name AS <query>;
-
-where query is any valid Drill query. Each table you create must have a unique
-name. You can include an optional column list for the new table. For example:
-
-    create table logtable(transid, prodid) as select transaction_id, product_id from ...
-
-You can store table data in one of three formats:
-
-  * csv
-  * parquet
-  * json
-
-The parquet and json formats can be used to store complex data.
-
-To set the output format for a Drill table, set the `store.format` option with
-the ALTER SYSTEM or ALTER SESSION command. For example:
-
-    alter session set `store.format`='json';
-
-Table data is stored in the location specified by the workspace that is in use
-when you run the CTAS statement. By default, a directory is created, using the
-exact table name specified in the CTAS statement. A .json, .csv, or .parquet
-file inside that directory contains the data.
-
-You can only create new tables in workspaces. You cannot create tables in
-other storage plugins such as Hive and HBase.
-
-You must use a writable (mutable) workspace when creating Drill tables. For
-example:
-
-	"tmp": {
-	      "location": "/tmp",
-	      "writable": true,
-	       }
-
-## Example
-
-The following query returns one row from a JSON file:
-
-	0: jdbc:drill:zk=local> select id, type, name, ppu
-	from dfs.`/Users/brumsby/drill/donuts.json`;
-	+------------+------------+------------+------------+
-	|     id     |    type    |    name    |    ppu     |
-	+------------+------------+------------+------------+
-	| 0001       | donut      | Cake       | 0.55       |
-	+------------+------------+------------+------------+
-	1 row selected (0.248 seconds)
-
-To create and verify the contents of a table that contains this row:
-
-  1. Set the workspace to a writable workspace.
-  2. Set the `store.format` option appropriately.
-  3. Run a CTAS statement that contains the query.
-  4. Go to the directory where the table is stored and check the contents of the file.
-  5. Run a query against the new table.
-
-The following sqlline output captures this sequence of steps.
-
-### Workspace Definition
-
-	"tmp": {
-	      "location": "/tmp",
-	      "writable": true,
-	       }
-
-### ALTER SESSION Command
-
-    alter session set `store.format`='json';
-
-### USE Command
-
-	0: jdbc:drill:zk=local> use dfs.tmp;
-	+------------+------------+
-	|     ok     |  summary   |
-	+------------+------------+
-	| true       | Default schema changed to 'dfs.tmp' |
-	+------------+------------+
-	1 row selected (0.03 seconds)
-
-### CTAS Command
-
-	0: jdbc:drill:zk=local> create table donuts_json as
-	select id, type, name, ppu from dfs.`/Users/brumsby/drill/donuts.json`;
-	+------------+---------------------------+
-	|  Fragment  | Number of records written |
-	+------------+---------------------------+
-	| 0_0        | 1                         |
-	+------------+---------------------------+
-	1 row selected (0.107 seconds)
-
-### File Contents
-
-	administorsmbp7:tmp brumsby$ pwd
-	/tmp
-	administorsmbp7:tmp brumsby$ cd donuts_json
-	administorsmbp7:donuts_json brumsby$ more 0_0_0.json
-	{
-	 "id" : "0001",
-	  "type" : "donut",
-	  "name" : "Cake",
-	  "ppu" : 0.55
-	}
-
-### Query Against New Table
-
-	0: jdbc:drill:zk=local> select * from donuts_json;
-	+------------+------------+------------+------------+
-	|     id     |    type    |    name    |    ppu     |
-	+------------+------------+------------+------------+
-	| 0001       | donut      | Cake       | 0.55       |
-	+------------+------------+------------+------------+
-	1 row selected (0.053 seconds)
-
-### Use a Different Output Format
-
-You can run the same sequence again with a different storage format set for
-the system or session (csv or parquet). For example, if the format is set to
-csv, and you name the table donuts_csv, the resulting file would look like
-this:
-
-	administorsmbp7:tmp brumsby$ cd donuts_csv
-	administorsmbp7:donuts_csv brumsby$ ls
-	0_0_0.csv
-	administorsmbp7:donuts_csv brumsby$ more 0_0_0.csv
-	id,type,name,ppu
-	0001,donut,Cake,0.55
-

http://git-wip-us.apache.org/repos/asf/drill/blob/9700ff63/_docs/sql-reference/sql-commands/030-create-table-as.md
----------------------------------------------------------------------
diff --git a/_docs/sql-reference/sql-commands/030-create-table-as.md b/_docs/sql-reference/sql-commands/030-create-table-as.md
new file mode 100644
index 0000000..f5ba9d3
--- /dev/null
+++ b/_docs/sql-reference/sql-commands/030-create-table-as.md
@@ -0,0 +1,134 @@
+---
+title: "CREATE TABLE AS (CTAS)"
+parent: "SQL Commands"
+---
+You can create tables in Drill by using the CTAS command:
+
+    CREATE TABLE new_table_name AS <query>;
+
+where query is any valid Drill query. Each table you create must have a unique
+name. You can include an optional column list for the new table. For example:
+
+    create table logtable(transid, prodid) as select transaction_id, product_id from ...
+
+You can store table data in one of three formats:
+
+  * csv
+  * parquet
+  * json
+
+The parquet and json formats can be used to store complex data.
+
+To set the output format for a Drill table, set the `store.format` option with
+the ALTER SYSTEM or ALTER SESSION command. For example:
+
+    alter session set `store.format`='json';
+
+Table data is stored in the location specified by the workspace that is in use
+when you run the CTAS statement. By default, a directory is created, using the
+exact table name specified in the CTAS statement. A .json, .csv, or .parquet
+file inside that directory contains the data.
+
+You can only create new tables in workspaces. You cannot create tables in
+other storage plugins such as Hive and HBase.
+
+You must use a writable (mutable) workspace when creating Drill tables. For
+example:
+
+	"tmp": {
+	      "location": "/tmp",
+	      "writable": true,
+	       }
+
+## Example
+
+The following query returns one row from a JSON file:
+
+	0: jdbc:drill:zk=local> select id, type, name, ppu
+	from dfs.`/Users/brumsby/drill/donuts.json`;
+	+------------+------------+------------+------------+
+	|     id     |    type    |    name    |    ppu     |
+	+------------+------------+------------+------------+
+	| 0001       | donut      | Cake       | 0.55       |
+	+------------+------------+------------+------------+
+	1 row selected (0.248 seconds)
+
+To create and verify the contents of a table that contains this row:
+
+  1. Set the workspace to a writable workspace.
+  2. Set the `store.format` option appropriately.
+  3. Run a CTAS statement that contains the query.
+  4. Go to the directory where the table is stored and check the contents of the file.
+  5. Run a query against the new table.
+
+The following sqlline output captures this sequence of steps.
+
+### Workspace Definition
+
+	"tmp": {
+	      "location": "/tmp",
+	      "writable": true,
+	       }
+
+### ALTER SESSION Command
+
+    alter session set `store.format`='json';
+
+### USE Command
+
+	0: jdbc:drill:zk=local> use dfs.tmp;
+	+------------+------------+
+	|     ok     |  summary   |
+	+------------+------------+
+	| true       | Default schema changed to 'dfs.tmp' |
+	+------------+------------+
+	1 row selected (0.03 seconds)
+
+### CTAS Command
+
+	0: jdbc:drill:zk=local> create table donuts_json as
+	select id, type, name, ppu from dfs.`/Users/brumsby/drill/donuts.json`;
+	+------------+---------------------------+
+	|  Fragment  | Number of records written |
+	+------------+---------------------------+
+	| 0_0        | 1                         |
+	+------------+---------------------------+
+	1 row selected (0.107 seconds)
+
+### File Contents
+
+	administorsmbp7:tmp brumsby$ pwd
+	/tmp
+	administorsmbp7:tmp brumsby$ cd donuts_json
+	administorsmbp7:donuts_json brumsby$ more 0_0_0.json
+	{
+	 "id" : "0001",
+	  "type" : "donut",
+	  "name" : "Cake",
+	  "ppu" : 0.55
+	}
+
+### Query Against New Table
+
+	0: jdbc:drill:zk=local> select * from donuts_json;
+	+------------+------------+------------+------------+
+	|     id     |    type    |    name    |    ppu     |
+	+------------+------------+------------+------------+
+	| 0001       | donut      | Cake       | 0.55       |
+	+------------+------------+------------+------------+
+	1 row selected (0.053 seconds)
+
+### Use a Different Output Format
+
+You can run the same sequence again with a different storage format set for
+the system or session (csv or parquet). For example, if the format is set to
+csv, and you name the table donuts_csv, the resulting file would look like
+this:
+
+	administorsmbp7:tmp brumsby$ cd donuts_csv
+	administorsmbp7:donuts_csv brumsby$ ls
+	0_0_0.csv
+	administorsmbp7:donuts_csv brumsby$ more 0_0_0.csv
+	id,type,name,ppu
+	0001,donut,Cake,0.55
+

http://git-wip-us.apache.org/repos/asf/drill/blob/9700ff63/_docs/sql-reference/sql-commands/050-create-view-command.md
----------------------------------------------------------------------
diff --git a/_docs/sql-reference/sql-commands/050-create-view-command.md b/_docs/sql-reference/sql-commands/050-create-view-command.md
deleted file mode 100644
index 53cf3b8..0000000
--- a/_docs/sql-reference/sql-commands/050-create-view-command.md
+++ /dev/null
@@ -1,197 +0,0 @@
----
-title: "CREATE VIEW Command"
-parent: "SQL Commands"
----
-The CREATE VIEW command creates a virtual structure for the result set of a
-stored query. A view can combine data from multiple underlying data sources
-and provide the illusion that all of the data is from one source. You can use
-views to protect sensitive data, for data aggregation, and to hide data
-complexity from users. You can create Drill views from files in your local and
-distributed file systems, Hive, HBase, and MapR-DB tables, as well as from
-existing views or any other available storage plugin data sources.
-
-## Syntax
-
-The CREATE VIEW command supports the following syntax:
-
-    CREATE [OR REPLACE] VIEW [workspace.]view_name [ (column_name [, ...]) ] AS <query>;
-
-Use CREATE VIEW to create a new view. Use CREATE OR REPLACE VIEW to replace an
-existing view with the same name. When you replace a view, the query must
-generate the same set of columns with the same column names and data types.
-
-**Note:** Follow Drill’s rules for identifiers when you name the view. See coming soon...
-
-## Parameters
-
-_workspace_  
-The location where you want the view to exist. By default, the view is created
-in the current workspace. See
-[Workspaces]({{ site.baseurl }}/docs/Workspaces).
-
-_view_name_  
-The name that you give the view. The view must have a unique name. It cannot
-have the same name as any other view or table in the workspace.
-
-_column_name_  
-Optional list of column names in the view. If you do not supply column names,
-they are derived from the query.
-
-_query_  
-A SELECT statement that defines the columns and rows in the view.
-
-## Usage Notes
-
-### Storage
-
-Drill stores views in the location specified by the workspace that you use
-when you run the CREATE VIEW command. If the workspace is not defined, Drill
-creates the view in the current workspace. You must use a writable workspace
-when you create a view. Currently, Drill only supports views created in the
-file system or distributed file system.
-
-The following example shows a writable workspace as defined within the storage
-plugin in the `/tmp` directory of the file system:
-
-    "tmp": {
-          "location": "/tmp",
-          "writable": true,
-           }
-
-Drill stores the view definition in JSON format with the name that you specify
-when you run the CREATE VIEW command, suffixed `by .view.drill`. For example,
-if you create a view named `myview`, Drill stores the view in the designated
-workspace as `myview.view.drill`.
-
-Data Sources
-
-Drill considers data sources to have either a strong schema or a weak schema.  
-
-##### Strong Schema
-
-With the exception of text file data sources, Drill verifies that data sources
-associated with a strong schema contain data types compatible with those used
-in the query. Drill also verifies that the columns referenced in the query
-exist in the underlying data sources. If the columns do not exist, CREATE VIEW
-fails.
-
-#### Weak Schema
-
-Drill does not verify that data sources associated with a weak schema contain
-data types compatible with those used in the query. Drill does not verify if
-columns referenced in a query on a Parquet data source exist, therefore CREATE
-VIEW always succeeds. In the case of JSON files, Drill does not verify if the
-files contain the maps specified in the view.
-
-The following table lists the current categories of schema and the data
-sources associated with each:
-
-<table>
-  <tr>
-    <th></th>
-    <th>Strong Schema</th>
-    <th>Weak Schema</th>
-  </tr>
-  <tr>
-    <td valign="top">Data Sources</td>
-    <td>views<br>hive tables<br>hbase column families<br>text</td>
-    <td>json<br>mongodb<br>hbase column qualifiers<br>parquet</td>
-  </tr>
-</table>
-  
-## Related Commands
-
-After you create a view using the CREATE VIEW command, you can issue the
-following commands against the view:
-
-  * SELECT 
-  * DESCRIBE 
-  * DROP 
-
-{% include startnote.html %}You cannot update, insert into, or delete from a view.{% include endnote.html %}
-
-## Example
-
-This example shows you some steps that you can follow when you want to create
-a view in Drill using the CREATE VIEW command. A workspace named “donuts” was
-created for the steps in this example.
-
-Complete the following steps to create a view in Drill:
-
-  1. Decide which workspace you will use to create the view, and verify that the writable option is set to “true.” You can use an existing workspace, or you can create a new workspace. See [Workspaces](https://cwiki.apache.org/confluence/display/DRILL/Workspaces) for more information.  
-  
-        "workspaces": {
-           "donuts": {
-             "location": "/home/donuts",
-             "writable": true,
-             "defaultInputFormat": null
-           }
-         },
-
-  2. Run SHOW DATABASES to verify that Drill recognizes the workspace.  
-
-        0: jdbc:drill:zk=local> show databases;
-        +-------------+
-        | SCHEMA_NAME |
-        +-------------+
-        | dfs.default |
-        | dfs.root  |
-        | dfs.donuts  |
-        | dfs.tmp   |
-        | cp.default  |
-        | sys       |
-        | INFORMATION_SCHEMA |
-        +-------------+
-
-  3. Use the writable workspace.  
-
-        0: jdbc:drill:zk=local> use dfs.donuts;
-        +------------+------------+
-        |     ok    |  summary   |
-        +------------+------------+
-        | true      | Default schema changed to 'dfs.donuts' |
-        +------------+------------+
-
-  4. Test run the query that you plan to use with the CREATE VIEW command.  
-
-        0: jdbc:drill:zk=local> select id, type, name, ppu from `donuts.json`;
-        +------------+------------+------------+------------+
-        |     id    |   type    |   name    |    ppu    |
-        +------------+------------+------------+------------+
-        | 0001      | donut      | Cake     | 0.55      |
-        +------------+------------+------------+------------+
-
-  5. Run the CREATE VIEW command with the query.  
-
-        0: jdbc:drill:zk=local> create view mydonuts as select id, type, name, ppu from `donuts.json`;
-        +------------+------------+
-        |     ok    |  summary   |
-        +------------+------------+
-        | true      | View 'mydonuts' created successfully in 'dfs.donuts' schema |
-        +------------+------------+
-
-  6. Create a new view in another workspace from the current workspace.  
-
-        0: jdbc:drill:zk=local> create view dfs.tmp.yourdonuts as select id, type, name from `donuts.json`;
-        +------------+------------+
-        |   ok  |  summary   |
-        +------------+------------+
-        | true      | View 'yourdonuts' created successfully in 'dfs.tmp' schema |
-        +------------+------------+
-
-  7. Query the view created in both workspaces.
-
-        0: jdbc:drill:zk=local> select * from mydonuts;
-        +------------+------------+------------+------------+
-        |     id    |   type    |   name    |    ppu    |
-        +------------+------------+------------+------------+
-        | 0001      | donut      | Cake     | 0.55      |
-        +------------+------------+------------+------------+
-         
-         
-        0: jdbc:drill:zk=local> select * from dfs.tmp.yourdonuts;
-        +------------+------------+------------+
-        |   id  |   type    |   name    |
-        +------------+------------+------------+
-        | 0001      | donut     | Cake      |
-        +------------+------------+------------+

http://git-wip-us.apache.org/repos/asf/drill/blob/9700ff63/_docs/sql-reference/sql-commands/050-create-view.md
----------------------------------------------------------------------
diff --git a/_docs/sql-reference/sql-commands/050-create-view.md b/_docs/sql-reference/sql-commands/050-create-view.md
new file mode 100644
index 0000000..4d71148
--- /dev/null
+++ b/_docs/sql-reference/sql-commands/050-create-view.md
@@ -0,0 +1,197 @@
+---
+title: "CREATE VIEW"
+parent: "SQL Commands"
+---
+The CREATE VIEW command creates a virtual structure for the result set of a
+stored query. A view can combine data from multiple underlying data sources
+and provide the illusion that all of the data is from one source. You can use
+views to protect sensitive data, for data aggregation, and to hide data
+complexity from users. You can create Drill views from files in your local and
+distributed file systems, Hive, HBase, and MapR-DB tables, as well as from
+existing views or any other available storage plugin data sources.
+
+## Syntax
+
+The CREATE VIEW command supports the following syntax:
+
+    CREATE [OR REPLACE] VIEW [workspace.]view_name [ (column_name [, ...]) ] AS <query>;
+
+Use CREATE VIEW to create a new view. Use CREATE OR REPLACE VIEW to replace an
+existing view with the same name. When you replace a view, the query must
+generate the same set of columns with the same column names and data types.
+
+**Note:** Follow Drill’s rules for identifiers when you name the view. See coming soon...
+
+## Parameters
+
+_workspace_  
+The location where you want the view to exist. By default, the view is created
+in the current workspace. See
+[Workspaces]({{ site.baseurl }}/docs/Workspaces).
+
+_view_name_  
+The name that you give the view. The view must have a unique name. It cannot
+have the same name as any other view or table in the workspace.
+
+_column_name_  
+Optional list of column names in the view. If you do not supply column names,
+they are derived from the query.
+
+_query_  
+A SELECT statement that defines the columns and rows in the view.
+
+## Usage Notes
+
+### Storage
+
+Drill stores views in the location specified by the workspace that you use
+when you run the CREATE VIEW command. If the workspace is not defined, Drill
+creates the view in the current workspace. You must use a writable workspace
+when you create a view. Currently, Drill only supports views created in the
+file system or distributed file system.
+
+The following example shows a writable workspace as defined within the storage
+plugin in the `/tmp` directory of the file system:
+
+    "tmp": {
+          "location": "/tmp",
+          "writable": true,
+           }
+
+Drill stores the view definition in JSON format with the name that you specify
+when you run the CREATE VIEW command, suffixed `by .view.drill`. For example,
+if you create a view named `myview`, Drill stores the view in the designated
+workspace as `myview.view.drill`.
+
+Data Sources
+
+Drill considers data sources to have either a strong schema or a weak schema.  
+
+##### Strong Schema
+
+With the exception of text file data sources, Drill verifies that data sources
+associated with a strong schema contain data types compatible with those used
+in the query. Drill also verifies that the columns referenced in the query
+exist in the underlying data sources. If the columns do not exist, CREATE VIEW
+fails.
+
+#### Weak Schema
+
+Drill does not verify that data sources associated with a weak schema contain
+data types compatible with those used in the query. Drill does not verify if
+columns referenced in a query on a Parquet data source exist, therefore CREATE
+VIEW always succeeds. In the case of JSON files, Drill does not verify if the
+files contain the maps specified in the view.
+
+The following table lists the current categories of schema and the data
+sources associated with each:
+
+<table>
+  <tr>
+    <th></th>
+    <th>Strong Schema</th>
+    <th>Weak Schema</th>
+  </tr>
+  <tr>
+    <td valign="top">Data Sources</td>
+    <td>views<br>hive tables<br>hbase column families<br>text</td>
+    <td>json<br>mongodb<br>hbase column qualifiers<br>parquet</td>
+  </tr>
+</table>
+  
+## Related Commands
+
+After you create a view using the CREATE VIEW command, you can issue the
+following commands against the view:
+
+  * SELECT 
+  * DESCRIBE 
+  * DROP 
+
+{% include startnote.html %}You cannot update, insert into, or delete from a view.{% include endnote.html %}
+
+## Example
+
+This example shows you some steps that you can follow when you want to create
+a view in Drill using the CREATE VIEW command. A workspace named “donuts” was
+created for the steps in this example.
+
+Complete the following steps to create a view in Drill:
+
+  1. Decide which workspace you will use to create the view, and verify that the writable option is set to “true.” You can use an existing workspace, or you can create a new workspace. See [Workspaces](https://cwiki.apache.org/confluence/display/DRILL/Workspaces) for more information.  
+  
+        "workspaces": {
+           "donuts": {
+             "location": "/home/donuts",
+             "writable": true,
+             "defaultInputFormat": null
+           }
+         },
+
+  2. Run SHOW DATABASES to verify that Drill recognizes the workspace.  
+
+        0: jdbc:drill:zk=local> show databases;
+        +-------------+
+        | SCHEMA_NAME |
+        +-------------+
+        | dfs.default |
+        | dfs.root  |
+        | dfs.donuts  |
+        | dfs.tmp   |
+        | cp.default  |
+        | sys       |
+        | INFORMATION_SCHEMA |
+        +-------------+
+
+  3. Use the writable workspace.  
+
+        0: jdbc:drill:zk=local> use dfs.donuts;
+        +------------+------------+
+        |     ok    |  summary   |
+        +------------+------------+
+        | true      | Default schema changed to 'dfs.donuts' |
+        +------------+------------+
+
+  4. Test run the query that you plan to use with the CREATE VIEW command.  
+
+        0: jdbc:drill:zk=local> select id, type, name, ppu from `donuts.json`;
+        +------------+------------+------------+------------+
+        |     id    |   type    |   name    |    ppu    |
+        +------------+------------+------------+------------+
+        | 0001      | donut      | Cake     | 0.55      |
+        +------------+------------+------------+------------+
+
+  5. Run the CREATE VIEW command with the query.  
+
+        0: jdbc:drill:zk=local> create view mydonuts as select id, type, name, ppu from `donuts.json`;
+        +------------+------------+
+        |     ok    |  summary   |
+        +------------+------------+
+        | true      | View 'mydonuts' created successfully in 'dfs.donuts' schema |
+        +------------+------------+
+
+  6. Create a new view in another workspace from the current workspace.  
+
+        0: jdbc:drill:zk=local> create view dfs.tmp.yourdonuts as select id, type, name from `donuts.json`;
+        +------------+------------+
+        |   ok  |  summary   |
+        +------------+------------+
+        | true      | View 'yourdonuts' created successfully in 'dfs.tmp' schema |
+        +------------+------------+
+
+  7. Query the view created in both workspaces.
+
+        0: jdbc:drill:zk=local> select * from mydonuts;
+        +------------+------------+------------+------------+
+        |     id    |   type    |   name    |    ppu    |
+        +------------+------------+------------+------------+
+        | 0001      | donut      | Cake     | 0.55      |
+        +------------+------------+------------+------------+
+         
+         
+        0: jdbc:drill:zk=local> select * from dfs.tmp.yourdonuts;
+        +------------+------------+------------+
+        |   id  |   type    |   name    |
+        +------------+------------+------------+
+        | 0001      | donut     | Cake      |
+        +------------+------------+------------+

http://git-wip-us.apache.org/repos/asf/drill/blob/9700ff63/_docs/sql-reference/sql-commands/055-drop-view.md
----------------------------------------------------------------------
diff --git a/_docs/sql-reference/sql-commands/055-drop-view.md b/_docs/sql-reference/sql-commands/055-drop-view.md
new file mode 100644
index 0000000..4e8b477
--- /dev/null
+++ b/_docs/sql-reference/sql-commands/055-drop-view.md
@@ -0,0 +1,47 @@
+---
+title: "DROP VIEW"
+parent: "SQL Commands"
+---
+
+The DROP VIEW command removes a view that was created in a workspace using the CREATE VIEW command.
+
+## Syntax
+
+The DROP VIEW command supports the following syntax:
+
+     DROP VIEW [workspace.]view_name;
+
+## Usage Notes
+
+When you drop a view, all information about the view is deleted from the workspace in which it was created. DROP VIEW applies to the view only, not to the underlying data sources used to create the view. However, if you drop a view that another view is dependent on, you can no longer use the dependent view. If the underlying tables or views change after a view is created, you may want to drop and re-create the view. Alternatively, you can use the CREATE OR REPLACE VIEW syntax to update the view.
+
+## Example
+
+This example shows you some steps to follow when you want to drop a view in Drill using the DROP VIEW command. A workspace named “donuts” was created for the steps in this example.
+Complete the following steps to drop a view in Drill:
+Use the writable workspace from which the view was created.
+
+    0: jdbc:drill:zk=local> use dfs.donuts;
+    +------------+------------+
+    |     ok    |  summary   |
+    +------------+------------+
+    | true      | Default schema changed to 'dfs.donuts' |
+    +------------+------------+
+ 
+Use the DROP VIEW command to remove a view created in the current workspace.
+
+    0: jdbc:drill:zk=local> drop view mydonuts;
+    +------------+------------+
+    |     ok    |  summary   |
+    +------------+------------+
+    | true      | View 'mydonuts' deleted successfully from 'dfs.donuts' schema |
+    +------------+------------+
+
+Use the DROP VIEW command to remove a view created in another workspace.
+
+    0: jdbc:drill:zk=local> drop view dfs.tmp.yourdonuts;
+    +------------+------------+
+    |   ok  |  summary   |
+    +------------+------------+
+    | true      | View 'yourdonuts' deleted successfully from 'dfs.tmp' schema |
+    +------------+------------+

http://git-wip-us.apache.org/repos/asf/drill/blob/9700ff63/_docs/sql-reference/sql-commands/060-describe-command.md
----------------------------------------------------------------------
diff --git a/_docs/sql-reference/sql-commands/060-describe-command.md b/_docs/sql-reference/sql-commands/060-describe-command.md
deleted file mode 100644
index 349f0ef..0000000
--- a/_docs/sql-reference/sql-commands/060-describe-command.md
+++ /dev/null
@@ -1,99 +0,0 @@
----
-title: "DESCRIBE Command"
-parent: "SQL Commands"
----
-The DESCRIBE command returns information about columns in a table or view.
-
-## Syntax
-
-The DESCRIBE command supports the following syntax:
-
-    DESCRIBE [workspace.]table_name|view_name
-
-## Usage Notes
-
-You can issue the DESCRIBE command against views created in a workspace and
-tables created in Hive, HBase, and MapR-DB. You can issue the DESCRIBE command
-on a table or view from any schema. For example, if you are working in the
-`dfs.myworkspace` schema, you can issue the DESCRIBE command on a view or
-table in another schema. Currently, DESCRIBE does not support tables created
-in a file system.
-
-Drill only supports SQL data types. Verify that all data types in an external
-data source, such as Hive or HBase, map to supported data types in Drill. See
-Drill Data Type Mapping for more information.
-
-## Example
-
-The following example demonstrates the steps that you can follow when you want
-to use the DESCRIBE command to see column information for a view and for Hive
-and HBase tables.
-
-Complete the following steps to use the DESCRIBE command:
-
-  1. Issue the USE command to switch to a particular schema.
-
-        0: jdbc:drill:zk=drilldemo:5181> use hive;
-        +------------+------------+
-        |   ok  |  summary   |
-        +------------+------------+
-        | true      | Default schema changed to 'hive' |
-        +------------+------------+
-        1 row selected (0.025 seconds)
-
-  2. Issue the SHOW TABLES command to see the existing tables in the schema.
-
-        0: jdbc:drill:zk=drilldemo:5181> show tables;
-        +--------------+------------+
-        | TABLE_SCHEMA | TABLE_NAME |
-        +--------------+------------+
-        | hive.default | orders     |
-        | hive.default | products   |
-        +--------------+------------+
-        2 rows selected (0.438 seconds)
-
-  3. Issue the DESCRIBE command on a table.
-
-        0: jdbc:drill:zk=drilldemo:5181> describe orders;
-        +-------------+------------+-------------+
-        | COLUMN_NAME | DATA_TYPE  | IS_NULLABLE |
-        +-------------+------------+-------------+
-        | order_id  | BIGINT    | YES       |
-        | month     | VARCHAR   | YES       |
-        | purchdate   | TIMESTAMP  | YES        |
-        | cust_id   | BIGINT    | YES       |
-        | state     | VARCHAR   | YES       |
-        | prod_id   | BIGINT    | YES       |
-        | order_total | INTEGER | YES       |
-        +-------------+------------+-------------+
-        7 rows selected (0.64 seconds)
-
-  4. Issue the DESCRIBE command on a table in another schema from the current schema.
-
-        0: jdbc:drill:zk=drilldemo:5181> describe hbase.customers;
-        +-------------+------------+-------------+
-        | COLUMN_NAME | DATA_TYPE  | IS_NULLABLE |
-        +-------------+------------+-------------+
-        | row_key   | ANY       | NO        |
-        | address   | (VARCHAR(1), ANY) MAP | NO        |
-        | loyalty   | (VARCHAR(1), ANY) MAP | NO        |
-        | personal  | (VARCHAR(1), ANY) MAP | NO        |
-        +-------------+------------+-------------+
-        4 rows selected (0.671 seconds)
-
-  5. Issue the DESCRIBE command on a view in another schema from the current schema.
-
-        0: jdbc:drill:zk=drilldemo:5181> describe dfs.views.customers_vw;
-        +-------------+------------+-------------+
-        | COLUMN_NAME | DATA_TYPE  | IS_NULLABLE |
-        +-------------+------------+-------------+
-        | cust_id   | BIGINT    | NO        |
-        | name      | VARCHAR   | NO        |
-        | address   | VARCHAR   | NO        |
-        | gender    | VARCHAR   | NO        |
-        | age       | VARCHAR   | NO        |
-        | agg_rev   | VARCHAR   | NO        |
-        | membership  | VARCHAR | NO        |
-        +-------------+------------+-------------+
-        7 rows selected (0.403 seconds)
-

http://git-wip-us.apache.org/repos/asf/drill/blob/9700ff63/_docs/sql-reference/sql-commands/060-describe.md
----------------------------------------------------------------------
diff --git a/_docs/sql-reference/sql-commands/060-describe.md b/_docs/sql-reference/sql-commands/060-describe.md
new file mode 100644
index 0000000..6623c8f
--- /dev/null
+++ b/_docs/sql-reference/sql-commands/060-describe.md
@@ -0,0 +1,99 @@
+---
+title: "DESCRIBE"
+parent: "SQL Commands"
+---
+The DESCRIBE command returns information about columns in a table or view.
+
+## Syntax
+
+The DESCRIBE command supports the following syntax:
+
+    DESCRIBE [workspace.]table_name|view_name
+
+## Usage Notes
+
+You can issue the DESCRIBE command against views created in a workspace and
+tables created in Hive, HBase, and MapR-DB. You can issue the DESCRIBE command
+on a table or view from any schema. For example, if you are working in the
+`dfs.myworkspace` schema, you can issue the DESCRIBE command on a view or
+table in another schema. Currently, DESCRIBE does not support tables created
+in a file system.
+
+Drill only supports SQL data types. Verify that all data types in an external
+data source, such as Hive or HBase, map to supported data types in Drill. See
+Drill Data Type Mapping for more information.
+
+## Example
+
+The following example demonstrates the steps that you can follow when you want
+to use the DESCRIBE command to see column information for a view and for Hive
+and HBase tables.
+
+Complete the following steps to use the DESCRIBE command:
+
+  1. Issue the USE command to switch to a particular schema.
+
+        0: jdbc:drill:zk=drilldemo:5181> use hive;
+        +------------+------------+
+        |   ok  |  summary   |
+        +------------+------------+
+        | true      | Default schema changed to 'hive' |
+        +------------+------------+
+        1 row selected (0.025 seconds)
+
+  2. Issue the SHOW TABLES command to see the existing tables in the schema.
+
+        0: jdbc:drill:zk=drilldemo:5181> show tables;
+        +--------------+------------+
+        | TABLE_SCHEMA | TABLE_NAME |
+        +--------------+------------+
+        | hive.default | orders     |
+        | hive.default | products   |
+        +--------------+------------+
+        2 rows selected (0.438 seconds)
+
+  3. Issue the DESCRIBE command on a table.
+
+        0: jdbc:drill:zk=drilldemo:5181> describe orders;
+        +-------------+------------+-------------+
+        | COLUMN_NAME | DATA_TYPE  | IS_NULLABLE |
+        +-------------+------------+-------------+
+        | order_id  | BIGINT    | YES       |
+        | month     | VARCHAR   | YES       |
+        | purchdate   | TIMESTAMP  | YES        |
+        | cust_id   | BIGINT    | YES       |
+        | state     | VARCHAR   | YES       |
+        | prod_id   | BIGINT    | YES       |
+        | order_total | INTEGER | YES       |
+        +-------------+------------+-------------+
+        7 rows selected (0.64 seconds)
+
+  4. Issue the DESCRIBE command on a table in another schema from the current schema.
+
+        0: jdbc:drill:zk=drilldemo:5181> describe hbase.customers;
+        +-------------+------------+-------------+
+        | COLUMN_NAME | DATA_TYPE  | IS_NULLABLE |
+        +-------------+------------+-------------+
+        | row_key   | ANY       | NO        |
+        | address   | (VARCHAR(1), ANY) MAP | NO        |
+        | loyalty   | (VARCHAR(1), ANY) MAP | NO        |
+        | personal  | (VARCHAR(1), ANY) MAP | NO        |
+        +-------------+------------+-------------+
+        4 rows selected (0.671 seconds)
+
+  5. Issue the DESCRIBE command on a view in another schema from the current schema.
+
+        0: jdbc:drill:zk=drilldemo:5181> describe dfs.views.customers_vw;
+        +-------------+------------+-------------+
+        | COLUMN_NAME | DATA_TYPE  | IS_NULLABLE |
+        +-------------+------------+-------------+
+        | cust_id   | BIGINT    | NO        |
+        | name      | VARCHAR   | NO        |
+        | address   | VARCHAR   | NO        |
+        | gender    | VARCHAR   | NO        |
+        | age       | VARCHAR   | NO        |
+        | agg_rev   | VARCHAR   | NO        |
+        | membership  | VARCHAR | NO        |
+        +-------------+------------+-------------+
+        7 rows selected (0.403 seconds)
+

http://git-wip-us.apache.org/repos/asf/drill/blob/9700ff63/_docs/sql-reference/sql-commands/070-explain-commands.md
----------------------------------------------------------------------
diff --git a/_docs/sql-reference/sql-commands/070-explain-commands.md b/_docs/sql-reference/sql-commands/070-explain-commands.md
deleted file mode 100644
index acf0825..0000000
--- a/_docs/sql-reference/sql-commands/070-explain-commands.md
+++ /dev/null
@@ -1,156 +0,0 @@
----
-title: "EXPLAIN Commands"
-parent: "SQL Commands"
----
-EXPLAIN is a useful tool for examining the steps that a query goes through
-when it is executed. You can use the EXPLAIN output to gain a deeper
-understanding of the parallel processing that Drill queries exploit. You can
-also look at costing information, troubleshoot performance issues, and
-diagnose routine errors that may occur when you run queries.
-
-Drill provides two variations on the EXPLAIN command, one that returns the
-physical plan and one that returns the logical plan. A logical plan takes the
-SQL query (as written by the user and accepted by the parser) and translates
-it into a logical series of operations that correspond to SQL language
-constructs (without defining the specific algorithms that will be implemented
-to run the query). A physical plan translates the logical plan into a specific
-series of steps that will be used when the query runs. For example, a logical
-plan may indicate a join step in general and classify it as inner or outer,
-but the corresponding physical plan will indicate the specific type of join
-operator that will run, such as a merge join or a hash join. The physical plan
-is operational and reveals the specific _access methods_ that will be used for
-the query.
-
-An EXPLAIN command for a query that is run repeatedly under the exact same
-conditions against the same data will return the same plan. However, if you
-change a configuration option, for example, or update the tables or files that
-you are selecting from, you are likely to see plan changes.
-
-## EXPLAIN Syntax
-
-The EXPLAIN command supports the following syntax:
-
-    explain plan [ including all attributes ] [ with implementation | without implementation ] for <query> ;
-
-where `query` is any valid SELECT statement supported by Drill.
-
-##### INCLUDING ALL ATTRIBUTES
-
-This option returns costing information. You can use this option for both
-physical and logical plans.
-
-#### WITH IMPLEMENTATION | WITHOUT IMPLEMENTATION
-
-These options return the physical and logical plan information, respectively.
-The default is physical (WITH IMPLEMENTATION).
-
-## EXPLAIN for Physical Plans
-
-The EXPLAIN PLAN FOR <query> command returns the chosen physical execution
-plan for a query statement without running the query. You can use this command
-to see what kind of execution operators Drill implements. For example, you can
-find out what kind of join algorithm is chosen when tables or files are
-joined. You can also use this command to analyze errors and troubleshoot
-queries that do not run. For example, if you run into a casting error, the
-query plan text may help you isolate the problem.
-
-Use the following syntax:
-
-    explain plan for <query> ;
-
-The following set command increases the default text display (number of
-characters). By default, most of the plan output is not displayed.
-
-    0: jdbc:drill:zk=local> !set maxwidth 10000
-
-Do not use a semicolon to terminate set commands.
-
-For example, here is the top portion of the explain output for a
-COUNT(DISTINCT) query on a JSON file:
-
-    0: jdbc:drill:zk=local> !set maxwidth 10000
-	0: jdbc:drill:zk=local> explain plan for select type t, count(distinct id) from dfs.`/home/donuts/donuts.json` where type='donut' group by type;
-	+------------+------------+
-	|   text    |   json    |
-	+------------+------------+
-	| 00-00 Screen
-	00-01   Project(t=[$0], EXPR$1=[$1])
-	00-02       Project(t=[$0], EXPR$1=[$1])
-	00-03       HashAgg(group=[{0}], EXPR$1=[COUNT($1)])
-	00-04           HashAgg(group=[{0, 1}])
-	00-05           SelectionVectorRemover
-	00-06               Filter(condition=[=($0, 'donut')])
-	00-07               Scan(groupscan=[EasyGroupScan [selectionRoot=/home/donuts/donuts.json, numFiles=1, columns=[`type`, `id`], files=[file:/home/donuts/donuts.json]]])...
-	...
-
-Read the text output from bottom to top to understand the sequence of
-operators that will execute the query. Note that the physical plan starts with
-a scan of the JSON file that is being queried. The selected columns are
-projected and filtered, then the aggregate function is applied.
-
-The EXPLAIN text output is followed by detailed JSON output, which is reusable
-for submitting the query via Drill APIs.
-
-	| {
-	  "head" : {
-	    "version" : 1,
-	    "generator" : {
-	      "type" : "ExplainHandler",
-	      "info" : ""
-	    },
-	    "type" : "APACHE_DRILL_PHYSICAL",
-	    "options" : [ ],
-	    "queue" : 0,
-	    "resultMode" : "EXEC"
-	  },
-	....
-
-## Costing Information
-
-Add the INCLUDING ALL ATTRIBUTES option to the EXPLAIN command to see cost
-estimates for the query plan. For example:
-
-	0: jdbc:drill:zk=local> !set maxwidth 10000
-	0: jdbc:drill:zk=local> explain plan including all attributes for select * from dfs.`/home/donuts/donuts.json` where type='donut';
-	+------------+------------+
-	|   text    |   json    |
-	+------------+------------+
-	| 00-00 Screen: rowcount = 1.0, cumulative cost = {5.1 rows, 21.1 cpu, 0.0 io, 0.0 network, 0.0 memory}, id = 889
-	00-01   Project(*=[$0]): rowcount = 1.0, cumulative cost = {5.0 rows, 21.0 cpu, 0.0 io, 0.0 network, 0.0 memory}, id = 888
-	00-02       Project(T1¦¦*=[$0]): rowcount = 1.0, cumulative cost = {4.0 rows, 17.0 cpu, 0.0 io, 0.0 network, 0.0 memory}, id = 887
-	00-03       SelectionVectorRemover: rowcount = 1.0, cumulative cost = {3.0 rows, 13.0 cpu, 0.0 io, 0.0 network, 0.0 memory}, id = 886
-	00-04           Filter(condition=[=($1, 'donut')]): rowcount = 1.0, cumulative cost = {2.0 rows, 12.0 cpu, 0.0 io, 0.0 network, 0.0 memory}, id = 885
-	00-05           Project(T1¦¦*=[$0], type=[$1]): rowcount = 1.0, cumulative cost = {1.0 rows, 8.0 cpu, 0.0 io, 0.0 network, 0.0 memory}, id = 884
-	00-06               Scan(groupscan=[EasyGroupScan [selectionRoot=/home/donuts/donuts.json, numFiles=1, columns=[`*`], files=[file:/home/donuts/donuts.json]]]): rowcount = 1.0, cumulative cost = {0.0 rows, 0.0 cpu, 0.0 io, 0.0 network, 0.0 memory}, id = 883
-
-## EXPLAIN for Logical Plans
-
-To return the logical plan for a query (again, without actually running the
-query), use the EXPLAIN PLAN WITHOUT IMPLEMENTATION syntax:
-
-    explain plan without implementation for <query> ;
-
-For example:
-
-	0: jdbc:drill:zk=local> explain plan without implementation for select type t, count(distinct id) from dfs.`/home/donuts/donuts.json` where type='donut' group by type;
-	+------------+------------+
-	|   text    |   json    |
-	+------------+------------+
-	| DrillScreenRel
-	  DrillProjectRel(t=[$0], EXPR$1=[$1])
-	    DrillAggregateRel(group=[{0}], EXPR$1=[COUNT($1)])
-	    DrillAggregateRel(group=[{0, 1}])
-	        DrillFilterRel(condition=[=($0, 'donut')])
-	        DrillScanRel(table=[[dfs, /home/donuts/donuts.json]], groupscan=[EasyGroupScan [selectionRoot=/home/donuts/donuts.json, numFiles=1, columns=[`type`, `id`], files=[file:/home/donuts/donuts.json]]]) | {
-	  | {
-	  "head" : {
-	    "version" : 1,
-	    "generator" : {
-	    "type" : "org.apache.drill.exec.planner.logical.DrillImplementor",
-	    "info" : ""
-	    },
-	    "type" : "APACHE_DRILL_LOGICAL",
-	    "options" : null,
-	    "queue" : 0,
-	    "resultMode" : "LOGICAL"
-	  },...

http://git-wip-us.apache.org/repos/asf/drill/blob/9700ff63/_docs/sql-reference/sql-commands/070-explain.md
----------------------------------------------------------------------
diff --git a/_docs/sql-reference/sql-commands/070-explain.md b/_docs/sql-reference/sql-commands/070-explain.md
new file mode 100644
index 0000000..c5347a9
--- /dev/null
+++ b/_docs/sql-reference/sql-commands/070-explain.md
@@ -0,0 +1,156 @@
+---
+title: "EXPLAIN"
+parent: "SQL Commands"
+---
+EXPLAIN is a useful tool for examining the steps that a query goes through
+when it is executed. You can use the EXPLAIN output to gain a deeper
+understanding of the parallel processing that Drill queries exploit. You can
+also look at costing information, troubleshoot performance issues, and
+diagnose routine errors that may occur when you run queries.
+
+Drill provides two variations on the EXPLAIN command, one that returns the
+physical plan and one that returns the logical plan. A logical plan takes the
+SQL query (as written by the user and accepted by the parser) and translates
+it into a logical series of operations that correspond to SQL language
+constructs (without defining the specific algorithms that will be implemented
+to run the query). A physical plan translates the logical plan into a specific
+series of steps that will be used when the query runs. For example, a logical
+plan may indicate a join step in general and classify it as inner or outer,
+but the corresponding physical plan will indicate the specific type of join
+operator that will run, such as a merge join or a hash join. The physical plan
+is operational and reveals the specific _access methods_ that will be used for
+the query.
+
+An EXPLAIN command for a query that is run repeatedly under the exact same
+conditions against the same data will return the same plan. However, if you
+change a configuration option, for example, or update the tables or files that
+you are selecting from, you are likely to see plan changes.
+
+## EXPLAIN Syntax
+
+The EXPLAIN command supports the following syntax:
+
+    explain plan [ including all attributes ] [ with implementation | without implementation ] for <query> ;
+
+where `query` is any valid SELECT statement supported by Drill.
+
+##### INCLUDING ALL ATTRIBUTES
+
+This option returns costing information. You can use this option for both
+physical and logical plans.
+
+#### WITH IMPLEMENTATION | WITHOUT IMPLEMENTATION
+
+These options return the physical and logical plan information, respectively.
+The default is physical (WITH IMPLEMENTATION).
+
+## EXPLAIN for Physical Plans
+
+The EXPLAIN PLAN FOR <query> command returns the chosen physical execution
+plan for a query statement without running the query. You can use this command
+to see what kind of execution operators Drill implements. For example, you can
+find out what kind of join algorithm is chosen when tables or files are
+joined. You can also use this command to analyze errors and troubleshoot
+queries that do not run. For example, if you run into a casting error, the
+query plan text may help you isolate the problem.
+
+Use the following syntax:
+
+    explain plan for <query> ;
+
+The following set command increases the default text display (number of
+characters). By default, most of the plan output is not displayed.
+
+    0: jdbc:drill:zk=local> !set maxwidth 10000
+
+Do not use a semicolon to terminate set commands.
+
+For example, here is the top portion of the explain output for a
+COUNT(DISTINCT) query on a JSON file:
+
+    0: jdbc:drill:zk=local> !set maxwidth 10000
+	0: jdbc:drill:zk=local> explain plan for select type t, count(distinct id) from dfs.`/home/donuts/donuts.json` where type='donut' group by type;
+	+------------+------------+
+	|   text    |   json    |
+	+------------+------------+
+	| 00-00 Screen
+	00-01   Project(t=[$0], EXPR$1=[$1])
+	00-02       Project(t=[$0], EXPR$1=[$1])
+	00-03       HashAgg(group=[{0}], EXPR$1=[COUNT($1)])
+	00-04           HashAgg(group=[{0, 1}])
+	00-05           SelectionVectorRemover
+	00-06               Filter(condition=[=($0, 'donut')])
+	00-07               Scan(groupscan=[EasyGroupScan [selectionRoot=/home/donuts/donuts.json, numFiles=1, columns=[`type`, `id`], files=[file:/home/donuts/donuts.json]]])...
+	...
+
+Read the text output from bottom to top to understand the sequence of
+operators that will execute the query. Note that the physical plan starts with
+a scan of the JSON file that is being queried. The selected columns are
+projected and filtered, then the aggregate function is applied.
+
+The EXPLAIN text output is followed by detailed JSON output, which is reusable
+for submitting the query via Drill APIs.
+
+	| {
+	  "head" : {
+	    "version" : 1,
+	    "generator" : {
+	      "type" : "ExplainHandler",
+	      "info" : ""
+	    },
+	    "type" : "APACHE_DRILL_PHYSICAL",
+	    "options" : [ ],
+	    "queue" : 0,
+	    "resultMode" : "EXEC"
+	  },
+	....
+
+## Costing Information
+
+Add the INCLUDING ALL ATTRIBUTES option to the EXPLAIN command to see cost
+estimates for the query plan. For example:
+
+	0: jdbc:drill:zk=local> !set maxwidth 10000
+	0: jdbc:drill:zk=local> explain plan including all attributes for select * from dfs.`/home/donuts/donuts.json` where type='donut';
+	+------------+------------+
+	|   text    |   json    |
+	+------------+------------+
+	| 00-00 Screen: rowcount = 1.0, cumulative cost = {5.1 rows, 21.1 cpu, 0.0 io, 0.0 network, 0.0 memory}, id = 889
+	00-01   Project(*=[$0]): rowcount = 1.0, cumulative cost = {5.0 rows, 21.0 cpu, 0.0 io, 0.0 network, 0.0 memory}, id = 888
+	00-02       Project(T1¦¦*=[$0]): rowcount = 1.0, cumulative cost = {4.0 rows, 17.0 cpu, 0.0 io, 0.0 network, 0.0 memory}, id = 887
+	00-03       SelectionVectorRemover: rowcount = 1.0, cumulative cost = {3.0 rows, 13.0 cpu, 0.0 io, 0.0 network, 0.0 memory}, id = 886
+	00-04           Filter(condition=[=($1, 'donut')]): rowcount = 1.0, cumulative cost = {2.0 rows, 12.0 cpu, 0.0 io, 0.0 network, 0.0 memory}, id = 885
+	00-05           Project(T1¦¦*=[$0], type=[$1]): rowcount = 1.0, cumulative cost = {1.0 rows, 8.0 cpu, 0.0 io, 0.0 network, 0.0 memory}, id = 884
+	00-06               Scan(groupscan=[EasyGroupScan [selectionRoot=/home/donuts/donuts.json, numFiles=1, columns=[`*`], files=[file:/home/donuts/donuts.json]]]): rowcount = 1.0, cumulative cost = {0.0 rows, 0.0 cpu, 0.0 io, 0.0 network, 0.0 memory}, id = 883
+
+## EXPLAIN for Logical Plans
+
+To return the logical plan for a query (again, without actually running the
+query), use the EXPLAIN PLAN WITHOUT IMPLEMENTATION syntax:
+
+    explain plan without implementation for <query> ;
+
+For example:
+
+	0: jdbc:drill:zk=local> explain plan without implementation for select type t, count(distinct id) from dfs.`/home/donuts/donuts.json` where type='donut' group by type;
+	+------------+------------+
+	|   text    |   json    |
+	+------------+------------+
+	| DrillScreenRel
+	  DrillProjectRel(t=[$0], EXPR$1=[$1])
+	    DrillAggregateRel(group=[{0}], EXPR$1=[COUNT($1)])
+	    DrillAggregateRel(group=[{0, 1}])
+	        DrillFilterRel(condition=[=($0, 'donut')])
+	        DrillScanRel(table=[[dfs, /home/donuts/donuts.json]], groupscan=[EasyGroupScan [selectionRoot=/home/donuts/donuts.json, numFiles=1, columns=[`type`, `id`], files=[file:/home/donuts/donuts.json]]]) | {
+	  | {
+	  "head" : {
+	    "version" : 1,
+	    "generator" : {
+	    "type" : "org.apache.drill.exec.planner.logical.DrillImplementor",
+	    "info" : ""
+	    },
+	    "type" : "APACHE_DRILL_LOGICAL",
+	    "options" : null,
+	    "queue" : 0,
+	    "resultMode" : "LOGICAL"
+	  },...

http://git-wip-us.apache.org/repos/asf/drill/blob/9700ff63/_docs/sql-reference/sql-commands/080-select.md
----------------------------------------------------------------------
diff --git a/_docs/sql-reference/sql-commands/080-select.md b/_docs/sql-reference/sql-commands/080-select.md
index 4fb9f5e..5ee5e25 100644
--- a/_docs/sql-reference/sql-commands/080-select.md
+++ b/_docs/sql-reference/sql-commands/080-select.md
@@ -1,5 +1,5 @@
 ---
-title: "SELECT Statements"
+title: "SELECT"
 parent: "SQL Commands"
 ---
 Drill supports the following ANSI standard clauses in the SELECT statement:
@@ -83,96 +83,3 @@ all return Boolean results.
 In general, correlated subqueries are supported. EXISTS and NOT EXISTS
 subqueries that do not contain a correlation join are not yet supported.
 
-## WITH Clause
-
-The WITH clause is an optional clause used to contain one or more common table
-expressions (CTE) where each CTE defines a temporary table that exists for the
-duration of the query. Each subquery in the WITH clause specifies a table
-name, an optional list of column names, and a SELECT statement.
-
-## Syntax
-
-The WITH clause supports the following syntax:
-
-    [ WITH with_subquery [, ...] ]
-    where with_subquery is:
-    with_subquery_table_name [ ( column_name [, ...] ) ] AS ( query ) 
-
-## Parameters
-
-_with_subquery_table_name_
-
-A unique name for a temporary table that defines the results of a WITH clause
-subquery. You cannot use duplicate names within a single WITH clause. You must
-give each subquery a table name that can be referenced in the FROM clause.
-
-_column_name_
-
-An optional list of output column names for the WITH clause subquery,
-separated by commas. The number of column names specified must be equal to or
-less than the number of columns defined by the subquery.
-
-_query_
-
-Any SELECT query that Drill supports. See
-[SELECT]({{ site.baseurl }}/docs/SELECT+Statements).
-
-## Usage Notes
-
-Use the WITH clause to efficiently define temporary tables that Drill can
-access throughout the execution of a single query. The WITH clause is
-typically a simpler alternative to using subqueries in the main body of the
-SELECT statement. In some cases, Drill can evaluate a WITH subquery once and
-reuse the results for query optimization.
-
-You can use a WITH clause in the following SQL statements:
-
-  * SELECT (including subqueries within SELECT statements)
-
-  * CREATE TABLE AS
-
-  * CREATE VIEW
-
-  * EXPLAIN
-
-You can reference the temporary tables in the FROM clause of the query. If the
-FROM clause does not reference any tables defined by the WITH clause, Drill
-ignores the WITH clause and executes the query as normal.
-
-Drill can only reference a table defined by a WITH clause subquery in the
-scope of the SELECT query that the WITH clause begins. For example, you can
-reference such a table in the FROM clause of a subquery in the SELECT list,
-WHERE clause, or HAVING clause. You cannot use a WITH clause in a subquery and
-reference its table in the FROM clause of the main query or another subquery.
-
-You cannot specify another WITH clause inside a WITH clause subquery.
-
-For example, the following query includes a forward reference to table t2 in
-the definition of table t1:
-
-## Example
-
-The following example shows the WITH clause used to create a WITH query named
-`emp_data` that selects all of the rows from the `employee.json` file. The
-main query selects the `full_name, position_title, salary`, and `hire_date`
-rows from the `emp_data` temporary table (created from the WITH subquery) and
-orders the results by the hire date. The `emp_data` table only exists for the
-duration of the query.
-
-**Note:** The `employee.json` file is included with the Drill installation. It is located in the `cp.default` workspace which is configured by default. 
-
-    0: jdbc:drill:zk=local> with emp_data as (select * from cp.`employee.json`) select full_name, position_title, salary, hire_date from emp_data order by hire_date limit 10;
-    +------------------+-------------------------+------------+-----------------------+
-    | full_name        | position_title          |   salary   | hire_date             |
-    +------------------+-------------------------+------------+-----------------------+
-    | Bunny McCown     | Store Assistant Manager | 8000.0     | 1993-05-01 00:00:00.0 |
-    | Danielle Johnson | Store Assistant Manager | 8000.0     | 1993-05-01 00:00:00.0 |
-    | Dick Brummer     | Store Assistant Manager | 7900.0     | 1993-05-01 00:00:00.0 |
-    | Gregory Whiting  | Store Assistant Manager | 10000.0    | 1993-05-01 00:00:00.0 |
-    | Juanita Sharp    | HQ Human Resources      | 6700.0     | 1994-01-01 00:00:00.0 |
-    | Sheri Nowmer     | President               | 80000.0    | 1994-12-01 00:00:00.0 |
-    | Rebecca Kanagaki | VP Human Resources      | 15000.0    | 1994-12-01 00:00:00.0 |
-    | Shauna Wyro      | Store Manager           | 15000.0    | 1994-12-01 00:00:00.0 |
-    | Roberta Damstra  | VP Information Systems  | 25000.0    | 1994-12-01 00:00:00.0 |
-    | Pedro Castillo   | VP Country Manager      | 35000.0    | 1994-12-01 00:00:00.0 |
-    +------------+----------------+--------------+------------------------------------+
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/drill/blob/9700ff63/_docs/sql-reference/sql-commands/081-select-from.md
----------------------------------------------------------------------
diff --git a/_docs/sql-reference/sql-commands/081-select-from.md b/_docs/sql-reference/sql-commands/081-select-from.md
new file mode 100644
index 0000000..acaaa87
--- /dev/null
+++ b/_docs/sql-reference/sql-commands/081-select-from.md
@@ -0,0 +1,87 @@
+---
+title: "SELECT FROM"
+parent: "SQL Commands"
+---
+The FROM clause lists the references (tables, views, and subqueries) that data is selected from. Drill expands the traditional concept of a “table reference” in a standard SQL FROM clause to refer to files and directories in a local or distributed file system.
+
+## Syntax
+The FROM clause supports the following syntax:
+
+       ... FROM table_expression [, …]
+
+## Parameters
+*table_expression* 
+
+Includes one or more *table_references* and is typically followed by the WHERE, GROUP BY, ORDER BY, or HAVING clause. 
+
+*table_reference*
+
+       with_subquery_table_name [ [ AS ] alias [ ( column_alias [, ...] ) ] ]
+       table_name [ [ AS ] alias [ ( column_alias [, ...] ) ] ]
+       ( subquery ) [ AS ] alias [ ( column_alias [, ...] ) ]
+       table_reference [ ON join_condition ]
+
+   * *with\_subquery\_table_name*
+
+       A table defined by a subquery in the WITH clause.
+
+
+  * *table_name* 
+  
+    Name of a table or view. In Drill, you can also refer to a file system directory or a specific file.
+
+   * *alias* 
+
+    A temporary alternative name for a table or view that provides a convenient shortcut for identifying tables in other parts of a query, such as the WHERE clause. You must supply an alias for a table derived from a subquery. In other table references, aliases are optional. The AS keyword is always optional. Drill does not support the GROUP BY alias.
+
+   * *column_alias*  
+     
+    A temporary alternative name for a column in a table or view.
+
+   * *subquery*  
+  
+     A query expression that evaluates to a table. The table exists only for the duration of the query and is typically given a name or alias, though an alias is not required. You can also define column names for tables that derive from subqueries. Naming column aliases is important when you want to join the results of subqueries to other tables and when you want to select or constrain those columns elsewhere in the query. A subquery may contain an ORDER BY clause, but this clause may have no effect if a LIMIT or OFFSET clause is not also specified.
+
+   * *join_type*  
+ 
+    Specifies one of the following join types: 
+
+       [INNER] JOIN  
+       LEFT [OUTER] JOIN  
+       RIGHT [OUTER] JOIN  
+       FULL [OUTER] JOIN
+
+   * *ON join_condition*  
+
+       A type of join specification where the joining columns are stated as a condition that follows the ON keyword.  
+       Example:  
+      ` homes join listing on homes.listid=listing.listid and homes.homeid=listing.homeid`
+
+## Join Types
+INNER JOIN  
+
+Return matching rows only, based on the join condition or list of joining columns.  
+
+OUTER JOIN 
+
+Return all of the rows that the equivalent inner join would return plus non-matching rows from the "left" table, "right" table, or both tables. The left table is the first-listed table, and the right table is the second-listed table. The non-matching rows contain NULL values to fill the gaps in the output columns.
+
+## Usage Notes  
+   * Joined columns must have comparable data types.
+   * A join with the ON syntax retains both joining columns in its intermediate result set.
+
+
+## Examples
+The following query uses a workspace named `dfw.views` and joins a view named “custview” with a hive table named “orders” to determine sales for each membership type:
+
+       0: jdbc:drill:> select membership, sum(order_total) as sales from hive.orders, custview
+       where orders.cust_id=custview.cust_id
+       group by membership order by 2;
+       +------------+------------+
+       | membership |   sales    |
+       +------------+------------+
+       | "basic"    | 380665     |
+       | "silver"   | 708438     |
+       | "gold"     | 2787682    |
+       +------------+------------+
+       3 rows selected

http://git-wip-us.apache.org/repos/asf/drill/blob/9700ff63/_docs/sql-reference/sql-commands/082-select-group-by.md
----------------------------------------------------------------------
diff --git a/_docs/sql-reference/sql-commands/082-select-group-by.md b/_docs/sql-reference/sql-commands/082-select-group-by.md
new file mode 100644
index 0000000..a1f799c
--- /dev/null
+++ b/_docs/sql-reference/sql-commands/082-select-group-by.md
@@ -0,0 +1,51 @@
+---
+title: "SELECT GROUP BY"
+parent: "SQL Commands"
+---
+The GROUP BY clause identifies the grouping columns for the query. You typically use a GROUP BY clause in conjunction with an aggregate expression. Grouping columns must be declared when the query computes aggregates with standard functions such as SUM, AVG, and COUNT. Currently, Drill does not support grouping on aliases.
+
+
+## Syntax
+The GROUP BY clause supports the following syntax:  
+
+
+    GROUP BY expression [, ...]
+  
+
+## Parameters  
+*column_name*  
+
+Must be a column from the current scope of the query. For example, if a GROUP BY clause is in a subquery, it cannot refer to columns in the outer query.
+
+*expression*  
+
+The list of columns or expressions must match the list of non-aggregate expressions in the select list of the query.
+
+
+## Usage Notes
+*SelectItems* in the SELECT statement with a GROUP BY clause can only contain aggregates or grouping columns.
+
+
+## Examples
+The following query returns sales totals grouped by month:  
+
+       0: jdbc:drill:> select `month`, sum(order_total)
+       from orders group by `month` order by 2 desc;
+       +------------+------------+
+       | month | EXPR$1 |
+       +------------+------------+
+       | June | 950481 |
+       | May | 947796 |
+       | March | 836809 |
+       | April | 807291 |
+       | July | 757395 |
+       | October | 676236 |
+       | August | 572269 |
+       | February | 532901 |
+       | September | 373100 |
+       | January | 346536 |
+       +------------+------------+
+
+
+
+

http://git-wip-us.apache.org/repos/asf/drill/blob/9700ff63/_docs/sql-reference/sql-commands/083-select-having.md
----------------------------------------------------------------------
diff --git a/_docs/sql-reference/sql-commands/083-select-having.md b/_docs/sql-reference/sql-commands/083-select-having.md
new file mode 100644
index 0000000..76828c5
--- /dev/null
+++ b/_docs/sql-reference/sql-commands/083-select-having.md
@@ -0,0 +1,51 @@
+---
+title: "SELECT HAVING"
+parent: "SQL Commands"
+---
+The HAVING clause filters group rows created by the GROUP BY clause. The HAVING clause is applied to each group of the grouped table, much as a WHERE clause is applied to a select list. If there is no GROUP BY clause, the HAVING clause is applied to the entire result as a single group. The SELECT clause cannot refer directly to any column that does not have a GROUP BY clause.
+
+## Syntax
+The HAVING clause supports the following syntax:  
+
+`[ HAVING  boolean_expression ]`  
+
+## Expression  
+A *boolean expression* can include one or more of the following operators:  
+
+  * AND
+  * OR
+  * NOT
+  * IS NULL
+  * IS NOT NULL
+  * LIKE 
+  * BETWEEN
+  * IN
+  * Comparison operators
+  * Quantified comparison operators  
+
+## Usage Notes
+  * Any column referenced in a HAVING clause must be either a grouping column or a column that refers to the result of an aggregate function.
+  * In a HAVING clause, you cannot specify:
+   * An alias that was defined in the select list. You must repeat the original, unaliased expression. 
+   * An ordinal number that refers to a select list item. Only the GROUP BY and ORDER BY clauses accept ordinal numbers.
+
+## Examples
+The following example query uses the HAVING clause to constrain an aggregate result. Drill queries the `dfs.clicks workspace` and  returns the total number of clicks for devices that indicate high click-throughs:  
+
+       0: jdbc:drill:> select t.user_info.device, count(*) from \`clicks/clicks.json\` t 
+       group by t.user_info.device having count(*) > 1000;  
+       
+       +------------+------------+
+       |   EXPR$0   |   EXPR$1   |
+       +------------+------------+
+       | IOS5       | 11814      |
+       | AOS4.2     | 5986       |
+       | IOS6       | 4464       |
+       | IOS7       | 3135       |
+       | AOS4.4     | 1562       |
+       | AOS4.3     | 3039       |
+       +------------+------------+  
+
+The aggregate is a count of the records for each different mobile device in the clickstream data. Only the activity for the devices that registered more than 1000 transactions qualify for the result set.
+
+

http://git-wip-us.apache.org/repos/asf/drill/blob/9700ff63/_docs/sql-reference/sql-commands/084-select-limit.md
----------------------------------------------------------------------
diff --git a/_docs/sql-reference/sql-commands/084-select-limit.md b/_docs/sql-reference/sql-commands/084-select-limit.md
new file mode 100644
index 0000000..9a1d6b5
--- /dev/null
+++ b/_docs/sql-reference/sql-commands/084-select-limit.md
@@ -0,0 +1,51 @@
+---
+title: "SELECT LIMIT"
+parent: "SQL Commands"
+---
+The LIMIT clause limits the result set to the specified number of rows. You can use LIMIT with or without an ORDER BY clause.
+
+
+## Syntax
+The LIMIT clause supports the following syntax:  
+
+       [ LIMIT { count | ALL } ]
+
+Specifying ALL returns all records, which is equivalent to omitting the LIMIT clause from the SELECT statement.
+
+## Parameters
+*count*  
+
+Specifies the maximum number of rows to return.
+If the count expression evaluates to NULL, Drill treats it as LIMIT ALL. 
+
+## Examples
+The following example query includes the ORDER BY and LIMIT clauses and returns the top 20 sales totals by month and state:  
+
+       0: jdbc:drill:> select `month`, state, sum(order_total) as sales from orders group by `month`, state
+       order by 3 desc limit 20;
+       +------------+------------+------------+
+       |   month    |   state    |   sales    |
+       +------------+------------+------------+
+       | May        | ca         | 119586     |
+       | June       | ca         | 116322     |
+       | April      | ca         | 101363     |
+       | March      | ca         | 99540      |
+       | July       | ca         | 90285      |
+       | October    | ca         | 80090      |
+       | June       | tx         | 78363      |
+       | May        | tx         | 77247      |
+       | March      | tx         | 73815      |
+       | August     | ca         | 71255      |
+       | April      | tx         | 68385      |
+       | July       | tx         | 63858      |
+       | February   | ca         | 63527      |
+       | June       | fl         | 62199      |
+       | June       | ny         | 62052      |
+       | May        | fl         | 61651      |
+       | May        | ny         | 59369      |
+       | October    | tx         | 55076      |
+       | March      | fl         | 54867      |
+       | March      | ny         | 52101      |
+       +------------+------------+------------+
+       20 rows selected
+


[18/31] drill git commit: correct step 4

Posted by ts...@apache.org.
correct step 4


Project: http://git-wip-us.apache.org/repos/asf/drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/drill/commit/7c95f5ca
Tree: http://git-wip-us.apache.org/repos/asf/drill/tree/7c95f5ca
Diff: http://git-wip-us.apache.org/repos/asf/drill/diff/7c95f5ca

Branch: refs/heads/gh-pages
Commit: 7c95f5ca73d33947515d3aab5177f3dbe8b5c40e
Parents: 8921367
Author: Kristine Hahn <kh...@maprtech.com>
Authored: Mon May 18 07:41:11 2015 -0700
Committer: Kristine Hahn <kh...@maprtech.com>
Committed: Mon May 18 07:41:11 2015 -0700

----------------------------------------------------------------------
 .../050-starting-drill-on-windows.md                             | 4 +---
 1 file changed, 1 insertion(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/drill/blob/7c95f5ca/_docs/install/installing-drill-in-embedded-mode/050-starting-drill-on-windows.md
----------------------------------------------------------------------
diff --git a/_docs/install/installing-drill-in-embedded-mode/050-starting-drill-on-windows.md b/_docs/install/installing-drill-in-embedded-mode/050-starting-drill-on-windows.md
index 027b052..8538c84 100644
--- a/_docs/install/installing-drill-in-embedded-mode/050-starting-drill-on-windows.md
+++ b/_docs/install/installing-drill-in-embedded-mode/050-starting-drill-on-windows.md
@@ -8,9 +8,7 @@ Start the Drill shell using the **sqlline command**. The `zk=local` means the lo
 2. Go to the bin directory.
 2. Open Command Prompt and type the following command on the command line:
    ``sqlline.bat -u "jdbc:drill:zk=local"``
-3. At the sqlline> prompt, type `!connect jdbc:drill:zk=local` and then press Enter:
-   ![sqlline]({{ site.baseurl }}/docs/img/sqlline1.png)
-4. Enter the username, `admin`, and password, also `admin` when prompted.
+3. Enter the username, `admin`, and password, also `admin` when prompted.
    The `0: jdbc:drill:zk=local>` prompt appears.
 At this point, you can [submit queries]({{ site.baseurl }}/docs/drill-in-10-minutes#query-sample-data) to Drill.
 


[02/31] drill git commit: coordinate with Bridget's draft of Perf Tuning

Posted by ts...@apache.org.
coordinate with Bridget's draft of Perf Tuning


Project: http://git-wip-us.apache.org/repos/asf/drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/drill/commit/64910ec6
Tree: http://git-wip-us.apache.org/repos/asf/drill/tree/64910ec6
Diff: http://git-wip-us.apache.org/repos/asf/drill/diff/64910ec6

Branch: refs/heads/gh-pages
Commit: 64910ec60b5446533da460be9896fa74cef87cd0
Parents: 6c87ff0
Author: Kristine Hahn <kh...@maprtech.com>
Authored: Sun May 17 11:23:57 2015 -0700
Committer: Kristine Hahn <kh...@maprtech.com>
Committed: Sun May 17 11:23:57 2015 -0700

----------------------------------------------------------------------
 ...guring-a-multitenant-cluster-introduction.md |  4 +-
 .../050-configuring-multitenant-resources.md    |  6 ++-
 .../060-configuring-a-shared-drillbit.md        | 32 ++++++++--------
 .../010-configuration-options-introduction.md   |  8 ++--
 .../030-planning-and-exececution-options.md     | 40 +-------------------
 5 files changed, 29 insertions(+), 61 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/drill/blob/64910ec6/_docs/configure-drill/030-configuring-a-multitenant-cluster-introduction.md
----------------------------------------------------------------------
diff --git a/_docs/configure-drill/030-configuring-a-multitenant-cluster-introduction.md b/_docs/configure-drill/030-configuring-a-multitenant-cluster-introduction.md
index 80edfc8..c6a272f 100644
--- a/_docs/configure-drill/030-configuring-a-multitenant-cluster-introduction.md
+++ b/_docs/configure-drill/030-configuring-a-multitenant-cluster-introduction.md
@@ -15,6 +15,6 @@ You need to plan and configure the following resources for use with Drill and ot
 
 * [Memory]({{site.baseurl}}/docs/configuring-multitenant-resources)  
 * [CPU]({{site.baseurl}}/docs/configuring-multitenant-resources#how-to-manage-drill-cpu-resources)  
-* Disk  
+* [Disk]({{site.baseurl}}/docs/configuring-multitenant-resources#how-to-manage-drill-disk-resources) 
 
-When users share a Drillbit, [configure queues]({{site.baseurl}}/docs/configuring-resources-for-a-shared-drillbit#configuring-query-queuing) and [parallelization]({{site.baseurl}}/docs/configuring-resources-for-a-shared-drillbit#configuring-parallelization) in addition to memory.
\ No newline at end of file
+When users share a Drillbit, [configure queues]({{site.baseurl}}/docs/configuring-resources-for-a-shared-drillbit#configuring-query-queuing) and [parallelization]({{site.baseurl}}/docs/configuring-resources-for-a-shared-drillbit#configuring-parallelization) in addition to memory. 
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/drill/blob/64910ec6/_docs/configure-drill/050-configuring-multitenant-resources.md
----------------------------------------------------------------------
diff --git a/_docs/configure-drill/050-configuring-multitenant-resources.md b/_docs/configure-drill/050-configuring-multitenant-resources.md
index fcdab5c..c54f430 100644
--- a/_docs/configure-drill/050-configuring-multitenant-resources.md
+++ b/_docs/configure-drill/050-configuring-multitenant-resources.md
@@ -33,4 +33,8 @@ Configure NodeManager and ResourceManager to reconfigure the total memory requir
 Modify MapReduce memory to suit your application needs. Remaining memory is typically given to YARN applications. 
 
 ## How to Manage Drill CPU Resources
-Currently, you do not manage CPU resources within Drill. [Use Linux `cgroups`](http://en.wikipedia.org/wiki/Cgroups) to manage the CPU resources.
\ No newline at end of file
+Currently, you do not manage CPU resources within Drill. [Use Linux `cgroups`](http://en.wikipedia.org/wiki/Cgroups) to manage the CPU resources.
+
+## How to Manage Disk Resources
+
+The `planner.add_producer_consumer` system option enables or disables a secondary reading thread that works out of band of the rest of the scanning fragment to prefetch data from disk. If you interact with a certain type of storage medium that is slow or does not prefetch much data, this option tells Drill to add a producer consumer reading thread to the operation. Drill can then assign one thread that focuses on a single reading fragment. If Drill is using memory, you can disable this option to get better performance. If Drill is using disk space, you should enable this option and set a reasonable queue size for the `planner.producer_consumer_queue_size` option. For more information about these options, see the section,  "Performance Tuning".
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/drill/blob/64910ec6/_docs/configure-drill/060-configuring-a-shared-drillbit.md
----------------------------------------------------------------------
diff --git a/_docs/configure-drill/060-configuring-a-shared-drillbit.md b/_docs/configure-drill/060-configuring-a-shared-drillbit.md
index 52e3db4..4e47213 100644
--- a/_docs/configure-drill/060-configuring-a-shared-drillbit.md
+++ b/_docs/configure-drill/060-configuring-a-shared-drillbit.md
@@ -11,20 +11,7 @@ Set [options in sys.options]({{site.baseurl}}/docs/configuration-options-introdu
 * exec.queue.large  
 * exec.queue.small  
 
-### Example Configuration
-
-For example, you configure the queue reserved for large queries for a 5-query maximum. You configure the queue reserved for small queries for 20 queries. Users start to run queries, and Drill receives the following query requests in this order:
-
-* Query A (blue): 1 billion records, Drill estimates 10 million rows will be processed  
-* Query B (red): 2 billion records, Drill estimates 20 million rows will be processed  
-* Query C: 1 billion records  
-* Query D: 100 records
-
-The exec.queue.threshold default is 30 million, which is the estimated rows to be processed by the query. Queries A and B are queued in the large queue. The estimated rows to be processed reaches the 30 million threshold, filling the queue to capacity. The query C request arrives and goes on the wait list, and then query D arrives. Query D is queued immediately in the small queue because of its small size, as shown in the following diagram: 
-
-![drill queuing]({{ site.baseurl }}/docs/img/queuing.png)
-
-The Drill queuing configuration in this example tends to give many users running small queries a rapid response. Users running a large query might experience some delay until an earlier-received large query returns, freeing space in the large queue to process queries that are waiting.
+For more information, see the section, "Performance Tuning".
 
 ## Configuring Parallelization
 
@@ -39,7 +26,22 @@ To configure parallelization, configure the following options in the `sys.option
 * `planner.width.max.per.query`  
   Same as max per node but applies to the query as executed by the entire cluster.
 
-Configure the `planner.width.max.per.node` to achieve fine grained, absolute control over parallelization. 
+### planner.width.max_per_node
+Configure the `planner.width.max.per.node` to achieve fine grained, absolute control over parallelization. In this context *width* refers to fanout or distribution potential: the ability to run a query in parallel across the cores on a node and the nodes on a cluster. A physical plan consists of intermediate operations, known as query &quot;fragments,&quot; that run concurrently, yielding opportunities for parallelism above and below each exchange operator in the plan. An exchange operator represents a breakpoint in the execution flow where processing can be distributed. For example, a single-process scan of a file may flow into an exchange operator, followed by a multi-process aggregation fragment.
+
+The maximum width per node defines the maximum degree of parallelism for any fragment of a query, but the setting applies at the level of a single node in the cluster. The *default* maximum degree of parallelism per node is calculated as follows, with the theoretical maximum automatically scaled back (and rounded down) so that only 70% of the actual available capacity is taken into account: number of active drillbits (typically one per node) * number of cores per node * 0.7
+
+For example, on a single-node test system with 2 cores and hyper-threading enabled: 1 * 4 * 0.7 = 3
+
+When you modify the default setting, you can supply any meaningful number. The system does not automatically scale down your setting.
+
+### planner.width.max_per_query
+
+The max_per_query value also sets the maximum degree of parallelism for any given stage of a query, but the setting applies to the query as executed by the whole cluster (multiple nodes). In effect, the actual maximum width per query is the *minimum of two values*: min((number of nodes * width.max_per_node), width.max_per_query)
+
+For example, on a 4-node cluster where `width.max_per_node` is set to 6 and `width.max_per_query` is set to 30: min((4 * 6), 30) = 24
+
+In this case, the effective maximum width per query is 24, not 30.
 
 <!-- ??For example, setting the `planner.width.max.per.query` to 60 will not accelerate Drill operations because overlapping does not occur when executing 60 queries at the same time.??
 

http://git-wip-us.apache.org/repos/asf/drill/blob/64910ec6/_docs/configure-drill/configuration-options/010-configuration-options-introduction.md
----------------------------------------------------------------------
diff --git a/_docs/configure-drill/configuration-options/010-configuration-options-introduction.md b/_docs/configure-drill/configuration-options/010-configuration-options-introduction.md
index ee2ff9e..bdd19f3 100644
--- a/_docs/configure-drill/configuration-options/010-configuration-options-introduction.md
+++ b/_docs/configure-drill/configuration-options/010-configuration-options-introduction.md
@@ -22,8 +22,8 @@ The sys.options table lists the following options that you can set as a system o
 | exec.java_compiler                             | DEFAULT          | Switches between DEFAULT, JDK, and JANINO mode for the current session. Uses Janino by default for generated source code of less than exec.java_compiler_janino_maxsize; otherwise, switches to the JDK compiler.                                                                                                                                                |
 | exec.java_compiler_debug                       | TRUE             | Toggles the output of debug-level compiler error messages in runtime generated code.                                                                                                                                                                                                                                                                             |
 | exec.java_compiler_janino_maxsize              | 262144           | See the exec.java_compiler option comment. Accepts inputs of type LONG.                                                                                                                                                                                                                                                                                          |
-| exec.max_hash_table_size                       | 1073741824       | Ending size for hash tables. Range: 0 - 1073741824. For internal use.                                                                                                                                                                                                                                                                                            |
-| exec.min_hash_table_size                       | 65536            | Starting size for hash tables. Increase according to available memory to improve performance. Range: 0 - 1073741824. For internal use.                                                                                                                                                                                                                           |
+| exec.max_hash_table_size                       | 1073741824       | Ending size for hash tables. Range: 0 - 1073741824.                                                                                                                                                                                                                                                                                                              |
+| exec.min_hash_table_size                       | 65536            | Starting size for hash tables. Increase according to available memory to improve performance. Increasing for very large aggregations or joins when you have large amounts of memory for Drill to use. Range: 0 - 1073741824.                                                                                                                                     |
 | exec.queue.enable                              | FALSE            | Changes the state of query queues to control the number of queries that run simultaneously.                                                                                                                                                                                                                                                                      |
 | exec.queue.large                               | 10               | Sets the number of large queries that can run concurrently in the cluster. Range: 0-1000                                                                                                                                                                                                                                                                         |
 | exec.queue.small                               | 100              | Sets the number of small queries that can run concurrently in the cluster. Range: 0-1001                                                                                                                                                                                                                                                                         |
@@ -65,13 +65,13 @@ The sys.options table lists the following options that you can set as a system o
 | planner.partitioner_sender_max_threads         | 8                | Upper limit of threads for outbound queuing.                                                                                                                                                                                                                                                                                                                     |
 | planner.partitioner_sender_set_threads         | -1               | Overwrites the number of threads used to send out batches of records. Set to -1 to disable. Typically not changed.                                                                                                                                                                                                                                               |
 | planner.partitioner_sender_threads_factor      | 2                | A heuristic param to use to influence final number of threads. The higher the value the fewer the number of threads.                                                                                                                                                                                                                                             |
-| planner.producer_consumer_queue_size           | 10               | How much data to prefetch from disk in record batches out-of-band of query execution.                                                                                                                                                                                                                                                                            |
+| planner.producer_consumer_queue_size           | 10               | How much data to prefetch from disk in record batches out-of-band of query execution. The larger the queue size, the greater the amount of memory that the queue and overall query execution consumes.                                                                                                                                                           |
 | planner.slice_target                           | 100000           | The number of records manipulated within a fragment before Drill parallelizes operations.                                                                                                                                                                                                                                                                        |
 | planner.width.max_per_node                     | 3                | Maximum number of threads that can run in parallel for a query on a node. A slice is an individual thread. This number indicates the maximum number of slices per query for the query’s major fragment on a node.                                                                                                                                                |
 | planner.width.max_per_query                    | 1000             | Same as max per node but applies to the query as executed by the entire cluster. For example, this value might be the number of active Drillbits, or a higher number to return results faster.                                                                                                                                                                   |
 | store.format                                   | parquet          | Output format for data written to tables with the CREATE TABLE AS (CTAS) command. Allowed values are parquet, json, or text. Allowed values: 0, -1, 1000000                                                                                                                                                                                                      |
 | store.json.all_text_mode                       | FALSE            | Drill reads all data from the JSON files as VARCHAR. Prevents schema change errors.                                                                                                                                                                                                                                                                              |
-| store.json.extended_types                      | FALSE            | Turns on special JSON structures that Drill serializes for storing more type information than the [four basic JSON types] (http://docs.mongodb.org/manual/reference/mongodb-extended-json/).                                                                                                                                                                     |
+| store.json.extended_types                      | FALSE            | Turns on special JSON structures that Drill serializes for storing more type information than the [four basic JSON types](http://docs.mongodb.org/manual/reference/mongodb-extended-json/).                                                                                                                                                                      |
 | store.json.read_numbers_as_double              | FALSE            | Reads numbers with or without a decimal point as DOUBLE. Prevents schema change errors.                                                                                                                                                                                                                                                                          |
 | store.mongo.all_text_mode                      | FALSE            | Similar to store.json.all_text_mode for MongoDB.                                                                                                                                                                                                                                                                                                                 |
 | store.mongo.read_numbers_as_double             | FALSE            | Similar to store.json.read_numbers_as_double.                                                                                                                                                                                                                                                                                                                    |

http://git-wip-us.apache.org/repos/asf/drill/blob/64910ec6/_docs/configure-drill/configuration-options/030-planning-and-exececution-options.md
----------------------------------------------------------------------
diff --git a/_docs/configure-drill/configuration-options/030-planning-and-exececution-options.md b/_docs/configure-drill/configuration-options/030-planning-and-exececution-options.md
index 2608538..d1c9a30 100644
--- a/_docs/configure-drill/configuration-options/030-planning-and-exececution-options.md
+++ b/_docs/configure-drill/configuration-options/030-planning-and-exececution-options.md
@@ -14,42 +14,4 @@ Use the ALTER SYSTEM or ALTER SESSION commands to set options. Typically,
 you set the options at the session level unless you want the setting to
 persist across all sessions.
 
-The summary of system options lists default values. The following descriptions provide more detail on some of these options:
-
-### exec.min_hash_table_size
-
-The default starting size for hash tables. Increasing this size is useful for very large aggregations or joins when you have large amounts of memory for Drill to use. Drill can spend a lot of time resizing the hash table as it finds new data. If you have large data sets, you can increase this hash table size to increase performance.
-
-### planner.add_producer_consumer
-
-This option enables or disables a secondary reading thread that works out of band of the rest of the scanning fragment to prefetch data from disk. If you interact with a certain type of storage medium that is slow or does not prefetch much data, this option tells Drill to add a producer consumer reading thread to the operation. Drill can then assign one thread that focuses on a single reading fragment. If Drill is using memory, you can disable this option to get better performance. If Drill is using disk space, you should enable this option and set a reasonable queue size for the planner.producer_consumer_queue_size option.
-
-### planner.broadcast_threshold
-
-Threshold, in terms of a number of rows, that determines whether a broadcast join is chosen for a query. Regardless of the setting of the broadcast_join option (enabled or disabled), a broadcast join is not chosen unless the right side of the join is estimated to contain fewer rows than this threshold. The intent of this option is to avoid broadcasting too many rows for join purposes. Broadcasting involves sending data across nodes and is a network-intensive operation. (The &quot;right side&quot; of the join, which may itself be a join or simply a table, is determined by cost-based optimizations and heuristics during physical planning.)
-
-### planner.enable_broadcast_join, planner.enable_hashagg, planner.enable_hashjoin, planner.enable_mergejoin, planner.enable_multiphase_agg, planner.enable_streamagg
-
-These options enable or disable specific aggregation and join operators for queries. These operators are all enabled by default and in general should not be disabled.</p><p>Hash aggregation and hash join are hash-based operations. Streaming aggregation and merge join are sort-based operations. Both hash-based and sort-based operations consume memory; however, currently, hash-based operations do not spill to disk as needed, but the sort-based operations do. If large hash operations do not fit in memory on your system, you may need to disable these operations. Queries will continue to run, using alternative plans.
-
-### planner.producer_consumer_queue_size
-
-Determines how much data to prefetch from disk (in record batches) out of band of query execution. The larger the queue size, the greater the amount of memory that the queue and overall query execution consumes.
-
-### planner.width.max_per_node
-
-In this context *width* refers to fanout or distribution potential: the ability to run a query in parallel across the cores on a node and the nodes on a cluster. A physical plan consists of intermediate operations, known as query &quot;fragments,&quot; that run concurrently, yielding opportunities for parallelism above and below each exchange operator in the plan. An exchange operator represents a breakpoint in the execution flow where processing can be distributed. For example, a single-process scan of a file may flow into an exchange operator, followed by a multi-process aggregation fragment.
-
-The maximum width per node defines the maximum degree of parallelism for any fragment of a query, but the setting applies at the level of a single node in the cluster. The *default* maximum degree of parallelism per node is calculated as follows, with the theoretical maximum automatically scaled back (and rounded down) so that only 70% of the actual available capacity is taken into account: number of active drillbits (typically one per node) * number of cores per node * 0.7
-
-For example, on a single-node test system with 2 cores and hyper-threading enabled: 1 * 4 * 0.7 = 3
-
-When you modify the default setting, you can supply any meaningful number. The system does not automatically scale down your setting.
-
-### planner.width.max_per_query
-
-The max_per_query value also sets the maximum degree of parallelism for any given stage of a query, but the setting applies to the query as executed by the whole cluster (multiple nodes). In effect, the actual maximum width per query is the *minimum of two values*: min((number of nodes * width.max_per_node), width.max_per_query)
-
-For example, on a 4-node cluster where `width.max_per_node` is set to 6 and `width.max_per_query` is set to 30: min((4 * 6), 30) = 24
-
-In this case, the effective maximum width per query is 24, not 30.
\ No newline at end of file
+The [summary of system options]({{site.baseurl}}/docs/configuration-options-introduction) lists default values and a short description of the planning and execution options. The planning option names have a planning preface. Execution options have an exe preface. For more information, see the section, "Performance Turning".The following descriptions provide more detail on some of these options:


[31/31] drill git commit: Merge branch 'gh-pages' of https://github.com/tshiran/drill into gh-pages

Posted by ts...@apache.org.
Merge branch 'gh-pages' of https://github.com/tshiran/drill into gh-pages


Project: http://git-wip-us.apache.org/repos/asf/drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/drill/commit/3237426b
Tree: http://git-wip-us.apache.org/repos/asf/drill/tree/3237426b
Diff: http://git-wip-us.apache.org/repos/asf/drill/diff/3237426b

Branch: refs/heads/gh-pages
Commit: 3237426be3951699e62c8e9a0a3f90545a02671d
Parents: 7d66334 134b6ff
Author: Tomer Shiran <ts...@gmail.com>
Authored: Mon May 18 16:34:58 2015 -0700
Committer: Tomer Shiran <ts...@gmail.com>
Committed: Mon May 18 16:34:58 2015 -0700

----------------------------------------------------------------------
 _data/docs.json                                 | 1944 ++++++++++++++----
 _docs/073-log-and-debug.md                      |    3 +
 .../070-configuring-user-impersonation.md       |    2 +-
 .../090-mongodb-plugin-for-apache-drill.md      |   26 +-
 ...ata-sources-and-file-formats-introduction.md |    2 +-
 .../030-deploying-and-using-a-hive-udf.md       |    2 +-
 .../040-parquet-format.md                       |    4 +-
 .../050-json-data-model.md                      |   16 +-
 .../020-develop-a-simple-function.md            |    4 +-
 .../030-developing-an-aggregate-function.md     |    4 +-
 _docs/getting-started/010-drill-introduction.md |    2 +-
 _docs/img/18.png                                |  Bin 22253 -> 18137 bytes
 _docs/img/sqlline1.png                          |  Bin 6633 -> 10413 bytes
 .../030-starting-drill-on-linux-and-mac-os-x.md |    2 +-
 .../040-installing-drill-on-windows.md          |    2 +-
 .../050-starting-drill-on-windows.md            |   19 +-
 .../001-log-and-debug-introduction.md           |   15 +
 _docs/log-and-debug/002-error-messages.md       |   25 +
 _docs/log-and-debug/003-modify-logback.xml.md   |   54 +
 .../004-review-the-java-stack-trace.md          |   41 +
 .../020-using-jdbc-with-squirrel-on-windows.md  |  164 ++
 _docs/odbc-jdbc-interfaces/020-using-jdbc.md    |  164 --
 ...microstrategy-analytics-with-apache-drill.md |    4 +-
 _docs/query-data/010-query-data-introduction.md |   14 +-
 _docs/query-data/030-querying-hbase.md          |   41 +-
 _docs/query-data/050-querying-hive.md           |    2 +-
 .../060-querying-the-information-schema.md      |    2 +-
 _docs/query-data/070-query-sys-tbl.md           |   89 +-
 .../010-querying-json-files.md                  |   33 +-
 .../020-querying-parquet-files.md               |  100 +-
 .../030-querying-plain-text-files.md            |   72 +-
 .../040-querying-directories.md                 |   34 +-
 .../005-querying-complex-data-introduction.md   |    4 +-
 _docs/sql-reference/040-operators.md            |    2 +-
 _docs/sql-reference/090-sql-extensions.md       |    8 +-
 .../data-types/010-supported-data-types.md      |   20 +-
 .../nested-data-functions/010-flatten.md        |    8 +-
 .../nested-data-functions/020-kvgen.md          |    5 +-
 .../sql-commands/005-supported-sql-commands.md  |    2 +-
 .../sql-commands/010-alter-session-command.md   |   74 -
 .../sql-commands/010-alter-session.md           |   74 +
 .../sql-commands/020-alter-system.md            |    2 +-
 .../sql-commands/030-create-table-as-command.md |  134 --
 .../sql-commands/030-create-table-as.md         |  134 ++
 .../sql-commands/050-create-view-command.md     |  197 --
 .../sql-commands/050-create-view.md             |  197 ++
 .../sql-reference/sql-commands/055-drop-view.md |   47 +
 .../sql-commands/060-describe-command.md        |   99 -
 .../sql-reference/sql-commands/060-describe.md  |   99 +
 .../sql-commands/070-explain-commands.md        |  156 --
 _docs/sql-reference/sql-commands/070-explain.md |  156 ++
 _docs/sql-reference/sql-commands/080-select.md  |   95 +-
 .../sql-commands/081-select-from.md             |   87 +
 .../sql-commands/082-select-group-by.md         |   51 +
 .../sql-commands/083-select-having.md           |   51 +
 .../sql-commands/084-select-limit.md            |   51 +
 .../sql-commands/085-select-offset.md           |   29 +
 .../sql-commands/086-select-order-by.md         |   71 +
 .../sql-commands/087-select-union.md            |   42 +
 .../sql-commands/088-select-where.md            |   59 +
 .../sql-commands/089-select-with.md             |   95 +
 .../090-show-databases-and-show-schemas.md      |    2 +-
 .../sql-commands/100-show-files.md              |    2 +-
 .../sql-commands/110-show-tables-command.md     |  136 --
 .../sql-commands/110-show-tables.md             |  136 ++
 .../sql-commands/120-use-command.md             |  170 --
 _docs/sql-reference/sql-commands/120-use.md     |  170 ++
 .../sql-functions/010-math-and-trig.md          |   14 +-
 .../sql-functions/020-data-type-conversion.md   |  211 +-
 .../030-date-time-functions-and-arithmetic.md   |  186 +-
 .../sql-functions/040-string-manipulation.md    |  143 +-
 .../050-aggregate-and-aggregate-statistical.md  |    8 +-
 .../sql-functions/073-log-and-debug.md          |    3 +
 _docs/tutorials/010-tutorials-introduction.md   |    6 +-
 _docs/tutorials/020-drill-in-10-minutes.md      |   21 +-
 .../030-analyzing-the-yelp-academic-dataset.md  |   13 +-
 .../050-analyzing-highly-dynamic-datasets.md    |    8 +-
 .../020-getting-to-know-the-drill-sandbox.md    |    2 -
 78 files changed, 3926 insertions(+), 2240 deletions(-)
----------------------------------------------------------------------



[19/31] drill git commit: correct schema spec

Posted by ts...@apache.org.
correct schema spec


Project: http://git-wip-us.apache.org/repos/asf/drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/drill/commit/3468b99b
Tree: http://git-wip-us.apache.org/repos/asf/drill/tree/3468b99b
Diff: http://git-wip-us.apache.org/repos/asf/drill/diff/3468b99b

Branch: refs/heads/gh-pages
Commit: 3468b99b7958be7697137f8ae8736cab34d6505e
Parents: 7c95f5c
Author: Kristine Hahn <kh...@maprtech.com>
Authored: Mon May 18 07:43:16 2015 -0700
Committer: Kristine Hahn <kh...@maprtech.com>
Committed: Mon May 18 07:43:16 2015 -0700

----------------------------------------------------------------------
 .../050-starting-drill-on-windows.md                               | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/drill/blob/3468b99b/_docs/install/installing-drill-in-embedded-mode/050-starting-drill-on-windows.md
----------------------------------------------------------------------
diff --git a/_docs/install/installing-drill-in-embedded-mode/050-starting-drill-on-windows.md b/_docs/install/installing-drill-in-embedded-mode/050-starting-drill-on-windows.md
index 8538c84..2cabb68 100644
--- a/_docs/install/installing-drill-in-embedded-mode/050-starting-drill-on-windows.md
+++ b/_docs/install/installing-drill-in-embedded-mode/050-starting-drill-on-windows.md
@@ -14,7 +14,7 @@ At this point, you can [submit queries]({{ site.baseurl }}/docs/drill-in-10-minu
 
 You can use the schema option in the **sqlline** command to specify a storage plugin. Specifying the storage plugin when you start up eliminates the need to specify the storage plugin in the query: For example, this command specifies the `dfs` storage plugin.
 
-    bin/sqlline –u jdbc:drill:schema=dfs;zk=local
+    c:\bin/sqlline –u "jdbc:drill:schema=dfs;zk=local"
 
 ## Exiting the Drill Shell
 


[26/31] drill git commit: fix links

Posted by ts...@apache.org.
fix links


Project: http://git-wip-us.apache.org/repos/asf/drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/drill/commit/97016752
Tree: http://git-wip-us.apache.org/repos/asf/drill/tree/97016752
Diff: http://git-wip-us.apache.org/repos/asf/drill/diff/97016752

Branch: refs/heads/gh-pages
Commit: 97016752fabe792acb57375538f356983d007e2c
Parents: 9700ff6
Author: Kristine Hahn <kh...@maprtech.com>
Authored: Mon May 18 13:51:54 2015 -0700
Committer: Kristine Hahn <kh...@maprtech.com>
Committed: Mon May 18 13:51:54 2015 -0700

----------------------------------------------------------------------
 _docs/sql-reference/sql-commands/005-supported-sql-commands.md | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/drill/blob/97016752/_docs/sql-reference/sql-commands/005-supported-sql-commands.md
----------------------------------------------------------------------
diff --git a/_docs/sql-reference/sql-commands/005-supported-sql-commands.md b/_docs/sql-reference/sql-commands/005-supported-sql-commands.md
index 37a08eb..df604cb 100644
--- a/_docs/sql-reference/sql-commands/005-supported-sql-commands.md
+++ b/_docs/sql-reference/sql-commands/005-supported-sql-commands.md
@@ -6,4 +6,4 @@ The following table provides a list of the SQL commands that Drill supports,
 with their descriptions and example syntax:
 
 <table style='table-layout:fixed;width:100%'>
-    <tr><th >Command</th><th >Description</th><th >Syntax</th></tr><tr><td valign="top" width="15%"><a href="/docs/alter-session">ALTER SESSION</a></td><td valign="top" width="60%">Changes a system setting for the duration of a session. A session ends when you quit the Drill shell. For a list of Drill options and their descriptions, refer to <a href="/docs/planning-and-execution-options">Planning and Execution Options</a>.</td><td valign="top"><pre>ALTER SESSION SET `&lt;option_name&gt;`=&lt;value&gt;;</pre></td></tr><tr><td valign="top" ><a href="/docs/alter-system">ALTER SYSTEM</a></td><td valign="top" >Permanently changes a system setting. The new settings persist across all sessions. For a list of Drill options and their descriptions, refer to <a href="/docs/planning-and-execution-options">Planning and Execution Options</a>.</td><td valign="top" ><pre>ALTER SYSTEM SET `&lt;option_name&gt;`=&lt;value&gt;;</pre></td></tr><tr><td valign="top" ><p><a href="/docs/create-table-as--cta
 s">CREATE TABLE AS<br />(CTAS)</a></p></td><td valign="top" >Creates a new table and populates the new table with rows returned from a SELECT query. Use the CREATE TABLE AS (CTAS) statement in place of INSERT INTO. When you issue the CTAS command, you create a directory that contains parquet or CSV files. Each workspace in a file system has a default file type.<br />You can specify which writer you want Drill to use when creating a table: parquet, CSV, or JSON (as specified with the <code>store.format</code> option).</td><td valign="top" ><pre class="programlisting">CREATE TABLE new_table_name AS &lt;query&gt;;</pre></td></tr><tr><td - valign="top" ><a href="/docs/create-view">CREATE VIEW </a></td><td - valign="top" >Creates a virtual structure for the result set of a stored query.-</td><td -valign="top" ><pre>CREATE [OR REPLACE] VIEW [workspace.]view_name [ (column_name [, ...]) ] AS &lt;query&gt;;</pre></td></tr><tr><td  valign="top" ><a href="/docs/describe">DESCRIBE</a></td><td 
  valign="top" >Returns information about columns in a table or view.</td><td valign="top" ><pre>DESCRIBE [workspace.]table_name|view_name</pre></td></tr><tr><td valign="top" ><a href="/docs/drop-view-command">DROP VIEW</a></td><td valign="top" >Removes a view.</td><td valign="top" ><pre>DROP VIEW [workspace.]view_name ;</pre></td></tr><tr><td  valign="top" ><a href="/docs/explain">EXPLAIN PLAN FOR</a></td><td valign="top" >Returns the physical plan for a particular query.</td><td valign="top" ><pre>EXPLAIN PLAN FOR &lt;query&gt;;</pre></td></tr><tr><td valign="top" ><a href="/docs/explain">EXPLAIN PLAN WITHOUT IMPLEMENTATION FOR</a></td><td valign="top" >Returns the logical plan for a particular query.</td><td  valign="top" ><pre>EXPLAIN PLAN WITHOUT IMPLEMENTATION FOR &lt;query&gt;;</pre></td></tr><tr><td colspan="1" valign="top" ><a href="/docs/select" rel="nofollow">SELECT</a></td><td valign="top" >Retrieves data from tables and files.</td><td  valign="top" ><pre>[WITH subquery]<
 br />SELECT column_list FROM table_name <br />[WHERE clause]<br />[GROUP BY clause]<br />[HAVING clause]<br />[ORDER BY clause];</pre></td></tr><tr><td  valign="top" ><a href="/docs/show-databases-and-show-schemas">SHOW DATABASES </a></td><td valign="top" >Returns a list of available schemas. Equivalent to SHOW SCHEMAS.</td><td valign="top" ><pre>SHOW DATABASES;</pre></td></tr><tr><td valign="top" ><a href="/docs/show-files" >SHOW FILES</a></td><td valign="top" >Returns a list of files in a file system schema.</td><td valign="top" ><pre>SHOW FILES IN filesystem.`schema_name`;<br />SHOW FILES FROM filesystem.`schema_name`;</pre></td></tr><tr><td valign="top" ><a href="/docs/show-databases-and-show-schemas">SHOW SCHEMAS</a></td><td - valign="top" >Returns a list of available schemas. Equivalent to SHOW DATABASES.</td><td valign="top" ><pre>SHOW SCHEMAS;</pre></td></tr><tr><td valign="top" ><a href="/docs/show-tables">SHOW TABLES</a></td><td valign="top" >Returns a list of tables and v
 iews.</td><td valign="top" ><pre>SHOW TABLES;</pre></td></tr><tr><td valign="top" ><a href="/docs/use">USE</a></td><td valign="top" >Change to a particular schema. When you opt to use a particular schema, Drill issues queries on that schema only.</td><td valign="top" ><pre>USE schema_name;</pre></td></tr></table>
+    <tr><th >Command</th><th >Description</th><th >Syntax</th></tr><tr><td valign="top" width="15%"><a href="/docs/alter-session">ALTER SESSION</a></td><td valign="top" width="60%">Changes a system setting for the duration of a session. A session ends when you quit the Drill shell. For a list of Drill options and their descriptions, refer to <a href="/docs/planning-and-execution-options">Planning and Execution Options</a>.</td><td valign="top"><pre>ALTER SESSION SET `&lt;option_name&gt;`=&lt;value&gt;;</pre></td></tr><tr><td valign="top" ><a href="/docs/alter-system">ALTER SYSTEM</a></td><td valign="top" >Permanently changes a system setting. The new settings persist across all sessions. For a list of Drill options and their descriptions, refer to <a href="/docs/planning-and-execution-options">Planning and Execution Options</a>.</td><td valign="top" ><pre>ALTER SYSTEM SET `&lt;option_name&gt;`=&lt;value&gt;;</pre></td></tr><tr><td valign="top" ><p><a href="/docs/create-table-as--cta
 s">CREATE TABLE AS<br />(CTAS)</a></p></td><td valign="top" >Creates a new table and populates the new table with rows returned from a SELECT query. Use the CREATE TABLE AS (CTAS) statement in place of INSERT INTO. When you issue the CTAS command, you create a directory that contains parquet or CSV files. Each workspace in a file system has a default file type.<br />You can specify which writer you want Drill to use when creating a table: parquet, CSV, or JSON (as specified with the <code>store.format</code> option).</td><td valign="top" ><pre class="programlisting">CREATE TABLE new_table_name AS &lt;query&gt;;</pre></td></tr><tr><td - valign="top" ><a href="/docs/create-view">CREATE VIEW </a></td><td - valign="top" >Creates a virtual structure for the result set of a stored query.-</td><td -valign="top" ><pre>CREATE [OR REPLACE] VIEW [workspace.]view_name [ (column_name [, ...]) ] AS &lt;query&gt;;</pre></td></tr><tr><td  valign="top" ><a href="/docs/describe">DESCRIBE</a></td><td 
  valign="top" >Returns information about columns in a table or view.</td><td valign="top" ><pre>DESCRIBE [workspace.]table_name|view_name</pre></td></tr><tr><td valign="top" ><a href="/docs/drop-view">DROP VIEW</a></td><td valign="top" >Removes a view.</td><td valign="top" ><pre>DROP VIEW [workspace.]view_name ;</pre></td></tr><tr><td  valign="top" ><a href="/docs/explain">EXPLAIN PLAN FOR</a></td><td valign="top" >Returns the physical plan for a particular query.</td><td valign="top" ><pre>EXPLAIN PLAN FOR &lt;query&gt;;</pre></td></tr><tr><td valign="top" ><a href="/docs/explain">EXPLAIN PLAN WITHOUT IMPLEMENTATION FOR</a></td><td valign="top" >Returns the logical plan for a particular query.</td><td  valign="top" ><pre>EXPLAIN PLAN WITHOUT IMPLEMENTATION FOR &lt;query&gt;;</pre></td></tr><tr><td colspan="1" valign="top" ><a href="/docs/select" rel="nofollow">SELECT</a></td><td valign="top" >Retrieves data from tables and files.</td><td  valign="top" ><pre>[WITH subquery]<br />SEL
 ECT column_list FROM table_name <br />[WHERE clause]<br />[GROUP BY clause]<br />[HAVING clause]<br />[ORDER BY clause];</pre></td></tr><tr><td  valign="top" ><a href="/docs/show-databases-and-show-schemas">SHOW DATABASES </a></td><td valign="top" >Returns a list of available schemas. Equivalent to SHOW SCHEMAS.</td><td valign="top" ><pre>SHOW DATABASES;</pre></td></tr><tr><td valign="top" ><a href="/docs/show-files" >SHOW FILES</a></td><td valign="top" >Returns a list of files in a file system schema.</td><td valign="top" ><pre>SHOW FILES IN filesystem.`schema_name`;<br />SHOW FILES FROM filesystem.`schema_name`;</pre></td></tr><tr><td valign="top" ><a href="/docs/show-databases-and-show-schemas">SHOW SCHEMAS</a></td><td - valign="top" >Returns a list of available schemas. Equivalent to SHOW DATABASES.</td><td valign="top" ><pre>SHOW SCHEMAS;</pre></td></tr><tr><td valign="top" ><a href="/docs/show-tables">SHOW TABLES</a></td><td valign="top" >Returns a list of tables and views.</t
 d><td valign="top" ><pre>SHOW TABLES;</pre></td></tr><tr><td valign="top" ><a href="/docs/use">USE</a></td><td valign="top" >Change to a particular schema. When you opt to use a particular schema, Drill issues queries on that schema only.</td><td valign="top" ><pre>USE schema_name;</pre></td></tr></table>


[12/31] drill git commit: Text updates

Posted by ts...@apache.org.
Text updates


Project: http://git-wip-us.apache.org/repos/asf/drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/drill/commit/3ae74f0e
Tree: http://git-wip-us.apache.org/repos/asf/drill/tree/3ae74f0e
Diff: http://git-wip-us.apache.org/repos/asf/drill/diff/3ae74f0e

Branch: refs/heads/gh-pages
Commit: 3ae74f0ea82617068115a0833309d303043198f2
Parents: 1b7072c
Author: Tomer Shiran <ts...@gmail.com>
Authored: Sun May 17 21:04:18 2015 -0700
Committer: Tomer Shiran <ts...@gmail.com>
Committed: Sun May 17 21:04:18 2015 -0700

----------------------------------------------------------------------
 blog/_drafts/drill-1.0-released.md |  8 ++++----
 index.html                         | 33 +++++++++++++++------------------
 2 files changed, 19 insertions(+), 22 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/drill/blob/3ae74f0e/blog/_drafts/drill-1.0-released.md
----------------------------------------------------------------------
diff --git a/blog/_drafts/drill-1.0-released.md b/blog/_drafts/drill-1.0-released.md
index e0c721a..e0631f7 100644
--- a/blog/_drafts/drill-1.0-released.md
+++ b/blog/_drafts/drill-1.0-released.md
@@ -7,14 +7,14 @@ authors: ["tshiran", "jnadeau"]
 ---
 We embarked on the Drill project in late 2012 with two primary objectives:
 
-* Revolutionize the query engine by enabling low-latency queries on Big Data while getting rid of all the 'overhead' - namely, the need to load data, create and maintain schemas, transform data, etc. We wanted to develop a system that would support the speed and agility at which modern organizations want (or need) to operate in this era.
+* Enable agility by getting rid of all the traditional overhead - namely, the need to load data, create and maintain schemas, transform data, etc. We wanted to develop a system that would support the speed and agility at which modern organizations want (or need) to operate in this era.
 * Unlock the data housed in non-relational datastores like NoSQL, Hadoop and cloud storage, making it available not only to developers, but also business users, analysts, data scientists and anyone else who can write a SQL query or use a BI tool. Non-relational datastores are capturing an increasing share of the world's data, and it's incredibly hard to explore and analyze this data.
 
-Today we're happy to announce the availability of Drill 1.0, our first production-ready release. Drill 1.0 includes many performance and reliability enhancements over previous releases.
+Today we're happy to announce the availability of the production-ready Drill 1.0 release. This release addresses [228 JIRAs](https://issues.apache.org/jira/secure/ReleaseNote.jspa?projectId=12313820&version=12325568) on top of the 0.9 release earlier this month.
 
 We would not have been able to reach this milestone without the tremendous effort by all the [committers]({{ site.baseurl }}/team/) and contributors, and we would like to congratulate the entire community on achieving this milestone. While 1.0 is an exciting milestone, it's really just the beginning of the journey. We'll release 1.1 next month, and continue with our 4-6 week release cycle, so you can count on many additional enhancements over the coming months.
 
-We have inlcluded the press release issued by the Apache Software Foundation below.
+We have included the press release issued by the Apache Software Foundation below.
 
 Happy Drilling!  
 Tomer Shiran and Jacques Nadeau
@@ -23,7 +23,7 @@ Tomer Shiran and Jacques Nadeau
 
 # The Apache Software Foundation Announces Apache™ Drill™ 1.0
 
-## Open Source schema-free SQL query engine revolutionizes data exploration and analytics for Apache Hadoop®, NoSQL and Cloud storage 
+## Open Source schema-free SQL query engine revolutionizes data exploration and analytics for Apache Hadoop®, NoSQL and Cloud storage
 
 Forest Hill, MD - 19 May 2015 - The Apache Software Foundation (ASF), the all-volunteer developers, stewards, and incubators of more than 350 Open Source projects and initiatives, announced today the availability of Apache™ Drill™ 1.0, the schema-free SQL query engine for Apache Hadoop®, NoSQL and Cloud storage.
 

http://git-wip-us.apache.org/repos/asf/drill/blob/3ae74f0e/index.html
----------------------------------------------------------------------
diff --git a/index.html b/index.html
index e64a70b..6b27d40 100755
--- a/index.html
+++ b/index.html
@@ -87,8 +87,8 @@ $(document).ready(function() {
   <div class="big"><img src="{{ site.baseurl }}/images/home-any.png" style="width:300px" /></div>
   <div class="description">
     <h1>Query any non-relational datastore (well, almost...)</h1>
-    <p>Drill supports a variety of NoSQL databases and file systems, including HBase, MongoDB, MapR-DB, HDFS, MapR-FS, S3, Azure Blob Storage, Google Cloud Storage, Swift, NAS and local files. A single query can join data from multiple datastores. For example, you could join a user profile collection in MongoDB with a directory of event logs in Hadoop.</p>
-    <p>Drill’s datastore-aware optimizer automatically restructures a query plan to leverage the datastore’s internal processing capabilities. In addition, Drill supports 'data locality', so it’s a good idea to co-locate Drill and the datastore on the same nodes.</p>
+    <p>Drill supports a variety of NoSQL databases and file systems, including HBase, MongoDB, MapR-DB, HDFS, MapR-FS, Amazon S3, Azure Blob Storage, Google Cloud Storage, Swift, NAS and local files. A single query can join data from multiple datastores. For example, you can join a user profile collection in MongoDB with a directory of event logs in Hadoop.</p>
+    <p>Drill's datastore-aware optimizer automatically restructures a query plan to leverage the datastore's internal processing capabilities. In addition, Drill supports data locality, so it's a good idea to co-locate Drill and the datastore on the same nodes.</p>
   </div>
   <div class="small"><img src="{{ site.baseurl }}/images/home-any.png" style="width:300px" /></div>
 </div>
@@ -100,7 +100,7 @@ $(document).ready(function() {
     <p>Drill leverages advanced query compilation and re-compilation techniques to maximize performance without requiring up-front schema knowledge.</p>
   </div>
   <div class="small big"><pre>SELECT * FROM <span class="code-underline">dfs.root.`/web/logs`</span>;
-  
+
 SELECT country, count(*)
   FROM <span class="code-underline">mongodb.web.users</span>
   GROUP BY country;
@@ -113,8 +113,8 @@ SELECT timestamp
 <div class="home-row">
   <div class="big"><img src="{{ site.baseurl }}/images/home-json.png" style="width:300px" /></div>
   <div class="description">
-    <h1>Treat your data like a table even when it’s not</h1>
-    <p>Drill features a JSON data model that enables it to query complex/nested data and rapidly evolving structure commonly seen in modern applications and non-relational datastores. Drill also provides intuitive extensions to SQL so that the user can easily query complex data.
+    <h1>Treat your data like a table even when it's not</h1>
+    <p>Drill features a JSON data model that enables queries on complex/nested data as well as rapidly evolving structures commonly seen in modern applications and non-relational datastores. Drill also provides intuitive extensions to SQL so that you can easily query complex data.
     <p>Drill is the only columnar query engine that supports complex data. It features an in-memory shredded columnar representation for complex data which allows Drill to achieve columnar speed with the flexibility of an internal JSON document model.</p>
   </div>
   <div class="small"><img src="{{ site.baseurl }}/images/home-json.png" style="width:300px" /></div>
@@ -124,33 +124,31 @@ SELECT timestamp
   <div class="description">
     <h1>Keep using the BI tools you love</h1>
     <p>Drill supports standard SQL. Business users, analysts and data scientists can use standard BI/analytics tools such as Tableau, Qlik, MicroStrategy, Spotfire, SAS and Excel to interact with non-relational datastores by leveraging Drill's JDBC and ODBC drivers. Developers can leverage Drill's simple REST API in their custom applications to create beautiful visualizations.</p>
-    <p>Drill’s virtual datasets allow even the most complex, non-relational data to be mapped into BI-friendly structures which users can explore and visualize using their tool of choice.</p>
+    <p>Drill's virtual datasets allow even the most complex, non-relational data to be mapped into BI-friendly structures which users can explore and visualize using their tool of choice.</p>
   </div>
   <div class="small big"><img src="{{ site.baseurl }}/images/home-bi.png" style="width:300px" /></div>
 </div>
 
 <div class="home-row">
-  <div class="big"><pre>$ curl j.mp/drill-1-0-0-rc1 -o drill.tgz
+  <div class="big"><pre>$ curl &lt;url&gt; -o drill.tgz
 $ tar xzf drill.tgz
-$ cd apache-drill-1.0.0
-$ bin/drill-embedded
-</pre></div>
+$ cd apache-drill-&lt;version&gt;
+$ bin/drill-embedded</pre></div>
   <div class="description">
     <h1>Scale from one laptop to 1000s of servers</h1>
-    <p>We made it easy to download and run Drill on your laptop. It runs on Mac, Windows and Linux, and within a minute or two you’ll be exploring your data. When you’re ready for prime time, deploy Drill on a cluster of commodity servers and take advantage of the world’s most scalable and high performance execution engine.
-    <p>Drill’s symmetrical architecture (all nodes are the same) and simple installation makes it easy to deploy and operate very large clusters.</p>
+    <p>We made it easy to download and run Drill on your laptop. It runs on Mac, Windows and Linux, and within a minute or two you'll be exploring your data. When you're ready for prime time, deploy Drill on a cluster of commodity servers and take advantage of the world's most scalable and high performance execution engine.
+    <p>Drill's symmetrical architecture (all nodes are the same) and simple installation make it easy to deploy and operate very large clusters.</p>
   </div>
-  <div class="small"><pre>$ curl j.mp/drill-1-0-0-rc1 -o drill.tgz
+  <div class="small"><pre>$ curl &lt;url&gt; -o drill.tgz
   $ tar xzf drill.tgz
-  $ cd apache-drill-1.0.0
-  $ bin/drill-embedded
-  </pre></div>
+  $ cd apache-drill-&lt;version&gt;
+  $ bin/drill-embedded</pre></div>
 </div>
 
 <div class="home-row">
   <div class="description">
     <h1>No more waiting for coffee</h1>
-    <p>Drill isn’t the world’s first query engine, but it’s the first that combines both flexibility and speed. To achieve this, Drill features a radically different architecture that enables record-breaking performance without sacrificing the flexibility offered by the JSON document model. For example:<ul>
+    <p>Drill isn't the world's first query engine, but it's the first that combines both flexibility and speed. To achieve this, Drill features a radically different architecture that enables record-breaking performance without sacrificing the flexibility offered by the JSON document model. Drill's design includes:<ul>
 <li>Columnar execution engine (the first ever to support complex data!)</li>
 <li>Data-driven compilation and recompilation at execution time</li>
 <li>Specialized memory management that reduces memory footprint and eliminates garbage collections</li>
@@ -159,4 +157,3 @@ $ bin/drill-embedded
   </div>
   <div class="small big"><img src="{{ site.baseurl }}/images/home-coffee.jpg" style="width:300px" /></div>
 </div>
-


[29/31] drill git commit: add BB's log and debug, using jdbc > using jdbc with squirrel on win

Posted by ts...@apache.org.
add BB's log and debug, using jdbc > using jdbc with squirrel on win


Project: http://git-wip-us.apache.org/repos/asf/drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/drill/commit/134b6ff8
Tree: http://git-wip-us.apache.org/repos/asf/drill/tree/134b6ff8
Diff: http://git-wip-us.apache.org/repos/asf/drill/diff/134b6ff8

Branch: refs/heads/gh-pages
Commit: 134b6ff8e90ea689abad9819b071cd9235c07877
Parents: e331872
Author: Kristine Hahn <kh...@maprtech.com>
Authored: Mon May 18 16:27:32 2015 -0700
Committer: Kristine Hahn <kh...@maprtech.com>
Committed: Mon May 18 16:27:32 2015 -0700

----------------------------------------------------------------------
 _data/docs.json                                 | 314 ++++++++++++++++---
 _docs/073-log-and-debug.md                      |   3 +
 .../001-log-and-debug-introduction.md           |  15 +
 _docs/log-and-debug/002-error-messages.md       |  25 ++
 _docs/log-and-debug/003-modify-logback.xml.md   |  54 ++++
 .../004-review-the-java-stack-trace.md          |  41 +++
 .../020-using-jdbc-with-squirrel-on-windows.md  | 164 ++++++++++
 _docs/odbc-jdbc-interfaces/020-using-jdbc.md    | 164 ----------
 .../sql-functions/073-log-and-debug.md          |   3 +
 9 files changed, 583 insertions(+), 200 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/drill/blob/134b6ff8/_data/docs.json
----------------------------------------------------------------------
diff --git a/_data/docs.json b/_data/docs.json
index 7fa1345..6566216 100644
--- a/_data/docs.json
+++ b/_data/docs.json
@@ -2805,6 +2805,23 @@
             "title": "Enron Emails", 
             "url": "/docs/enron-emails/"
         }, 
+        "Error Messages": {
+            "breadcrumbs": [
+                {
+                    "title": "Log and Debug", 
+                    "url": "/docs/log-and-debug/"
+                }
+            ], 
+            "children": [], 
+            "next_title": "Modify logback.xml", 
+            "next_url": "/docs/modify-logback-xml/", 
+            "parent": "Log and Debug", 
+            "previous_title": "Log and Debug Introduction", 
+            "previous_url": "/docs/log-and-debug-introduction/", 
+            "relative_path": "_docs/log-and-debug/002-error-messages.md", 
+            "title": "Error Messages", 
+            "url": "/docs/error-messages/"
+        }, 
         "FLATTEN": {
             "breadcrumbs": [
                 {
@@ -3660,8 +3677,8 @@
                 }
             ], 
             "children": [], 
-            "next_title": "Using JDBC", 
-            "next_url": "/docs/using-jdbc/", 
+            "next_title": "Using JDBC with SQuirreL on Windows", 
+            "next_url": "/docs/using-jdbc-with-squirrel-on-windows/", 
             "parent": "ODBC/JDBC Interfaces", 
             "previous_title": "ODBC/JDBC Interfaces", 
             "previous_url": "/docs/odbc-jdbc-interfaces/", 
@@ -3952,6 +3969,104 @@
             "title": "Lexical Structure", 
             "url": "/docs/lexical-structure/"
         }, 
+        "Log and Debug": {
+            "breadcrumbs": [], 
+            "children": [
+                {
+                    "breadcrumbs": [
+                        {
+                            "title": "Log and Debug", 
+                            "url": "/docs/log-and-debug/"
+                        }
+                    ], 
+                    "children": [], 
+                    "next_title": "Error Messages", 
+                    "next_url": "/docs/error-messages/", 
+                    "parent": "Log and Debug", 
+                    "previous_title": "Log and Debug", 
+                    "previous_url": "/docs/log-and-debug/", 
+                    "relative_path": "_docs/log-and-debug/001-log-and-debug-introduction.md", 
+                    "title": "Log and Debug Introduction", 
+                    "url": "/docs/log-and-debug-introduction/"
+                }, 
+                {
+                    "breadcrumbs": [
+                        {
+                            "title": "Log and Debug", 
+                            "url": "/docs/log-and-debug/"
+                        }
+                    ], 
+                    "children": [], 
+                    "next_title": "Modify logback.xml", 
+                    "next_url": "/docs/modify-logback-xml/", 
+                    "parent": "Log and Debug", 
+                    "previous_title": "Log and Debug Introduction", 
+                    "previous_url": "/docs/log-and-debug-introduction/", 
+                    "relative_path": "_docs/log-and-debug/002-error-messages.md", 
+                    "title": "Error Messages", 
+                    "url": "/docs/error-messages/"
+                }, 
+                {
+                    "breadcrumbs": [
+                        {
+                            "title": "Log and Debug", 
+                            "url": "/docs/log-and-debug/"
+                        }
+                    ], 
+                    "children": [], 
+                    "next_title": "Review the Java Stack Trace", 
+                    "next_url": "/docs/review-the-java-stack-trace/", 
+                    "parent": "Log and Debug", 
+                    "previous_title": "Error Messages", 
+                    "previous_url": "/docs/error-messages/", 
+                    "relative_path": "_docs/log-and-debug/003-modify-logback.xml.md", 
+                    "title": "Modify logback.xml", 
+                    "url": "/docs/modify-logback-xml/"
+                }, 
+                {
+                    "breadcrumbs": [
+                        {
+                            "title": "Log and Debug", 
+                            "url": "/docs/log-and-debug/"
+                        }
+                    ], 
+                    "children": [], 
+                    "next_title": "", 
+                    "next_url": "", 
+                    "parent": "Log and Debug", 
+                    "previous_title": "Modify logback.xml", 
+                    "previous_url": "/docs/modify-logback-xml/", 
+                    "relative_path": "_docs/log-and-debug/004-review-the-java-stack-trace.md", 
+                    "title": "Review the Java Stack Trace", 
+                    "url": "/docs/review-the-java-stack-trace/"
+                }
+            ], 
+            "next_title": "Log and Debug Introduction", 
+            "next_url": "/docs/log-and-debug-introduction/", 
+            "parent": "", 
+            "previous_title": "Project Bylaws", 
+            "previous_url": "/docs/project-bylaws/", 
+            "relative_path": "_docs/sql-reference/sql-functions/073-log-and-debug.md", 
+            "title": "Log and Debug", 
+            "url": "/docs/log-and-debug/"
+        }, 
+        "Log and Debug Introduction": {
+            "breadcrumbs": [
+                {
+                    "title": "Log and Debug", 
+                    "url": "/docs/log-and-debug/"
+                }
+            ], 
+            "children": [], 
+            "next_title": "Error Messages", 
+            "next_url": "/docs/error-messages/", 
+            "parent": "Log and Debug", 
+            "previous_title": "Log and Debug", 
+            "previous_url": "/docs/log-and-debug/", 
+            "relative_path": "_docs/log-and-debug/001-log-and-debug-introduction.md", 
+            "title": "Log and Debug Introduction", 
+            "url": "/docs/log-and-debug-introduction/"
+        }, 
         "MapR-DB Format": {
             "breadcrumbs": [
                 {
@@ -3990,6 +4105,23 @@
             "title": "Math and Trig", 
             "url": "/docs/math-and-trig/"
         }, 
+        "Modify logback.xml": {
+            "breadcrumbs": [
+                {
+                    "title": "Log and Debug", 
+                    "url": "/docs/log-and-debug/"
+                }
+            ], 
+            "children": [], 
+            "next_title": "Review the Java Stack Trace", 
+            "next_url": "/docs/review-the-java-stack-trace/", 
+            "parent": "Log and Debug", 
+            "previous_title": "Error Messages", 
+            "previous_url": "/docs/error-messages/", 
+            "relative_path": "_docs/log-and-debug/003-modify-logback.xml.md", 
+            "title": "Modify logback.xml", 
+            "url": "/docs/modify-logback-xml/"
+        }, 
         "MongoDB Plugin for Apache Drill": {
             "breadcrumbs": [
                 {
@@ -4200,8 +4332,8 @@
                         }
                     ], 
                     "children": [], 
-                    "next_title": "Using JDBC", 
-                    "next_url": "/docs/using-jdbc/", 
+                    "next_title": "Using JDBC with SQuirreL on Windows", 
+                    "next_url": "/docs/using-jdbc-with-squirrel-on-windows/", 
                     "parent": "ODBC/JDBC Interfaces", 
                     "previous_title": "ODBC/JDBC Interfaces", 
                     "previous_url": "/docs/odbc-jdbc-interfaces/", 
@@ -4222,9 +4354,9 @@
                     "parent": "ODBC/JDBC Interfaces", 
                     "previous_title": "Interfaces Introduction", 
                     "previous_url": "/docs/interfaces-introduction/", 
-                    "relative_path": "_docs/odbc-jdbc-interfaces/020-using-jdbc.md", 
-                    "title": "Using JDBC", 
-                    "url": "/docs/using-jdbc/"
+                    "relative_path": "_docs/odbc-jdbc-interfaces/020-using-jdbc-with-squirrel-on-windows.md", 
+                    "title": "Using JDBC with SQuirreL on Windows", 
+                    "url": "/docs/using-jdbc-with-squirrel-on-windows/"
                 }, 
                 {
                     "breadcrumbs": [
@@ -4406,8 +4538,8 @@
                     "next_title": "ODBC on Linux and Mac Introduction", 
                     "next_url": "/docs/odbc-on-linux-and-mac-introduction/", 
                     "parent": "ODBC/JDBC Interfaces", 
-                    "previous_title": "Using JDBC", 
-                    "previous_url": "/docs/using-jdbc/", 
+                    "previous_title": "Using JDBC with SQuirreL on Windows", 
+                    "previous_url": "/docs/using-jdbc-with-squirrel-on-windows/", 
                     "relative_path": "_docs/odbc-jdbc-interfaces/030-using-odbc-on-linux-and-mac-os-x.md", 
                     "title": "Using ODBC on Linux and Mac OS X", 
                     "url": "/docs/using-odbc-on-linux-and-mac-os-x/"
@@ -4709,8 +4841,8 @@
                         }
                     ], 
                     "children": [], 
-                    "next_title": "Query Audit Logging", 
-                    "next_url": "/docs/query-audit-logging/", 
+                    "next_title": "Log and Debug", 
+                    "next_url": "/docs/log-and-debug/", 
                     "parent": "Performance Tuning", 
                     "previous_title": "Performance Tuning", 
                     "previous_url": "/docs/performance-tuning/", 
@@ -4736,8 +4868,8 @@
                 }
             ], 
             "children": [], 
-            "next_title": "Query Audit Logging", 
-            "next_url": "/docs/query-audit-logging/", 
+            "next_title": "Log and Debug", 
+            "next_url": "/docs/log-and-debug/", 
             "parent": "Performance Tuning", 
             "previous_title": "Performance Tuning", 
             "previous_url": "/docs/performance-tuning/", 
@@ -4858,8 +4990,8 @@
         "Project Bylaws": {
             "breadcrumbs": [], 
             "children": [], 
-            "next_title": "", 
-            "next_url": "", 
+            "next_title": "Log and Debug", 
+            "next_url": "/docs/log-and-debug/", 
             "parent": "", 
             "previous_title": "2014 Q1 Drill Report", 
             "previous_url": "/docs/2014-q1-drill-report/", 
@@ -4891,8 +5023,8 @@
             "next_title": "Getting Query Information", 
             "next_url": "/docs/getting-query-information/", 
             "parent": "", 
-            "previous_title": "Performance Tuning Introduction", 
-            "previous_url": "/docs/performance-tuning-introduction/", 
+            "previous_title": "Log and Debug", 
+            "previous_url": "/docs/log-and-debug/", 
             "relative_path": "_docs/074-query-audit-logging.md", 
             "title": "Query Audit Logging", 
             "url": "/docs/query-audit-logging/"
@@ -6024,6 +6156,23 @@
             "title": "Reserved Keywords", 
             "url": "/docs/reserved-keywords/"
         }, 
+        "Review the Java Stack Trace": {
+            "breadcrumbs": [
+                {
+                    "title": "Log and Debug", 
+                    "url": "/docs/log-and-debug/"
+                }
+            ], 
+            "children": [], 
+            "next_title": "", 
+            "next_url": "", 
+            "parent": "Log and Debug", 
+            "previous_title": "Modify logback.xml", 
+            "previous_url": "/docs/modify-logback-xml/", 
+            "relative_path": "_docs/log-and-debug/004-review-the-java-stack-trace.md", 
+            "title": "Review the Java Stack Trace", 
+            "url": "/docs/review-the-java-stack-trace/"
+        }, 
         "SELECT": {
             "breadcrumbs": [
                 {
@@ -8931,7 +9080,7 @@
             "title": "Using Drill Explorer on Windows", 
             "url": "/docs/using-drill-explorer-on-windows/"
         }, 
-        "Using JDBC": {
+        "Using JDBC with SQuirreL on Windows": {
             "breadcrumbs": [
                 {
                     "title": "ODBC/JDBC Interfaces", 
@@ -8944,9 +9093,9 @@
             "parent": "ODBC/JDBC Interfaces", 
             "previous_title": "Interfaces Introduction", 
             "previous_url": "/docs/interfaces-introduction/", 
-            "relative_path": "_docs/odbc-jdbc-interfaces/020-using-jdbc.md", 
-            "title": "Using JDBC", 
-            "url": "/docs/using-jdbc/"
+            "relative_path": "_docs/odbc-jdbc-interfaces/020-using-jdbc-with-squirrel-on-windows.md", 
+            "title": "Using JDBC with SQuirreL on Windows", 
+            "url": "/docs/using-jdbc-with-squirrel-on-windows/"
         }, 
         "Using MicroStrategy Analytics with Apache Drill": {
             "breadcrumbs": [
@@ -9145,8 +9294,8 @@
             "next_title": "ODBC on Linux and Mac Introduction", 
             "next_url": "/docs/odbc-on-linux-and-mac-introduction/", 
             "parent": "ODBC/JDBC Interfaces", 
-            "previous_title": "Using JDBC", 
-            "previous_url": "/docs/using-jdbc/", 
+            "previous_title": "Using JDBC with SQuirreL on Windows", 
+            "previous_url": "/docs/using-jdbc-with-squirrel-on-windows/", 
             "relative_path": "_docs/odbc-jdbc-interfaces/030-using-odbc-on-linux-and-mac-os-x.md", 
             "title": "Using ODBC on Linux and Mac OS X", 
             "url": "/docs/using-odbc-on-linux-and-mac-os-x/"
@@ -10570,8 +10719,8 @@
                         }
                     ], 
                     "children": [], 
-                    "next_title": "Using JDBC", 
-                    "next_url": "/docs/using-jdbc/", 
+                    "next_title": "Using JDBC with SQuirreL on Windows", 
+                    "next_url": "/docs/using-jdbc-with-squirrel-on-windows/", 
                     "parent": "ODBC/JDBC Interfaces", 
                     "previous_title": "ODBC/JDBC Interfaces", 
                     "previous_url": "/docs/odbc-jdbc-interfaces/", 
@@ -10592,9 +10741,9 @@
                     "parent": "ODBC/JDBC Interfaces", 
                     "previous_title": "Interfaces Introduction", 
                     "previous_url": "/docs/interfaces-introduction/", 
-                    "relative_path": "_docs/odbc-jdbc-interfaces/020-using-jdbc.md", 
-                    "title": "Using JDBC", 
-                    "url": "/docs/using-jdbc/"
+                    "relative_path": "_docs/odbc-jdbc-interfaces/020-using-jdbc-with-squirrel-on-windows.md", 
+                    "title": "Using JDBC with SQuirreL on Windows", 
+                    "url": "/docs/using-jdbc-with-squirrel-on-windows/"
                 }, 
                 {
                     "breadcrumbs": [
@@ -10776,8 +10925,8 @@
                     "next_title": "ODBC on Linux and Mac Introduction", 
                     "next_url": "/docs/odbc-on-linux-and-mac-introduction/", 
                     "parent": "ODBC/JDBC Interfaces", 
-                    "previous_title": "Using JDBC", 
-                    "previous_url": "/docs/using-jdbc/", 
+                    "previous_title": "Using JDBC with SQuirreL on Windows", 
+                    "previous_url": "/docs/using-jdbc-with-squirrel-on-windows/", 
                     "relative_path": "_docs/odbc-jdbc-interfaces/030-using-odbc-on-linux-and-mac-os-x.md", 
                     "title": "Using ODBC on Linux and Mac OS X", 
                     "url": "/docs/using-odbc-on-linux-and-mac-os-x/"
@@ -11393,8 +11542,8 @@
                         }
                     ], 
                     "children": [], 
-                    "next_title": "Query Audit Logging", 
-                    "next_url": "/docs/query-audit-logging/", 
+                    "next_title": "Log and Debug", 
+                    "next_url": "/docs/log-and-debug/", 
                     "parent": "Performance Tuning", 
                     "previous_title": "Performance Tuning", 
                     "previous_url": "/docs/performance-tuning/", 
@@ -11414,6 +11563,18 @@
         }, 
         {
             "breadcrumbs": [], 
+            "children": [], 
+            "next_title": "Query Audit Logging", 
+            "next_url": "/docs/query-audit-logging/", 
+            "parent": "", 
+            "previous_title": "Performance Tuning Introduction", 
+            "previous_url": "/docs/performance-tuning-introduction/", 
+            "relative_path": "_docs/073-log-and-debug.md", 
+            "title": "Log and Debug", 
+            "url": "/docs/log-and-debug/"
+        }, 
+        {
+            "breadcrumbs": [], 
             "children": [
                 {
                     "breadcrumbs": [
@@ -11436,8 +11597,8 @@
             "next_title": "Getting Query Information", 
             "next_url": "/docs/getting-query-information/", 
             "parent": "", 
-            "previous_title": "Performance Tuning Introduction", 
-            "previous_url": "/docs/performance-tuning-introduction/", 
+            "previous_title": "Log and Debug", 
+            "previous_url": "/docs/log-and-debug/", 
             "relative_path": "_docs/074-query-audit-logging.md", 
             "title": "Query Audit Logging", 
             "url": "/docs/query-audit-logging/"
@@ -13224,14 +13385,95 @@
         {
             "breadcrumbs": [], 
             "children": [], 
-            "next_title": "", 
-            "next_url": "", 
+            "next_title": "Log and Debug", 
+            "next_url": "/docs/log-and-debug/", 
             "parent": "", 
             "previous_title": "2014 Q1 Drill Report", 
             "previous_url": "/docs/2014-q1-drill-report/", 
             "relative_path": "_docs/170-bylaws.md", 
             "title": "Project Bylaws", 
             "url": "/docs/project-bylaws/"
+        }, 
+        {
+            "breadcrumbs": [], 
+            "children": [
+                {
+                    "breadcrumbs": [
+                        {
+                            "title": "Log and Debug", 
+                            "url": "/docs/log-and-debug/"
+                        }
+                    ], 
+                    "children": [], 
+                    "next_title": "Error Messages", 
+                    "next_url": "/docs/error-messages/", 
+                    "parent": "Log and Debug", 
+                    "previous_title": "Log and Debug", 
+                    "previous_url": "/docs/log-and-debug/", 
+                    "relative_path": "_docs/log-and-debug/001-log-and-debug-introduction.md", 
+                    "title": "Log and Debug Introduction", 
+                    "url": "/docs/log-and-debug-introduction/"
+                }, 
+                {
+                    "breadcrumbs": [
+                        {
+                            "title": "Log and Debug", 
+                            "url": "/docs/log-and-debug/"
+                        }
+                    ], 
+                    "children": [], 
+                    "next_title": "Modify logback.xml", 
+                    "next_url": "/docs/modify-logback-xml/", 
+                    "parent": "Log and Debug", 
+                    "previous_title": "Log and Debug Introduction", 
+                    "previous_url": "/docs/log-and-debug-introduction/", 
+                    "relative_path": "_docs/log-and-debug/002-error-messages.md", 
+                    "title": "Error Messages", 
+                    "url": "/docs/error-messages/"
+                }, 
+                {
+                    "breadcrumbs": [
+                        {
+                            "title": "Log and Debug", 
+                            "url": "/docs/log-and-debug/"
+                        }
+                    ], 
+                    "children": [], 
+                    "next_title": "Review the Java Stack Trace", 
+                    "next_url": "/docs/review-the-java-stack-trace/", 
+                    "parent": "Log and Debug", 
+                    "previous_title": "Error Messages", 
+                    "previous_url": "/docs/error-messages/", 
+                    "relative_path": "_docs/log-and-debug/003-modify-logback.xml.md", 
+                    "title": "Modify logback.xml", 
+                    "url": "/docs/modify-logback-xml/"
+                }, 
+                {
+                    "breadcrumbs": [
+                        {
+                            "title": "Log and Debug", 
+                            "url": "/docs/log-and-debug/"
+                        }
+                    ], 
+                    "children": [], 
+                    "next_title": "", 
+                    "next_url": "", 
+                    "parent": "Log and Debug", 
+                    "previous_title": "Modify logback.xml", 
+                    "previous_url": "/docs/modify-logback-xml/", 
+                    "relative_path": "_docs/log-and-debug/004-review-the-java-stack-trace.md", 
+                    "title": "Review the Java Stack Trace", 
+                    "url": "/docs/review-the-java-stack-trace/"
+                }
+            ], 
+            "next_title": "Log and Debug Introduction", 
+            "next_url": "/docs/log-and-debug-introduction/", 
+            "parent": "", 
+            "previous_title": "Project Bylaws", 
+            "previous_url": "/docs/project-bylaws/", 
+            "relative_path": "_docs/sql-reference/sql-functions/073-log-and-debug.md", 
+            "title": "Log and Debug", 
+            "url": "/docs/log-and-debug/"
         }
     ]
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/drill/blob/134b6ff8/_docs/073-log-and-debug.md
----------------------------------------------------------------------
diff --git a/_docs/073-log-and-debug.md b/_docs/073-log-and-debug.md
new file mode 100644
index 0000000..142d6b6
--- /dev/null
+++ b/_docs/073-log-and-debug.md
@@ -0,0 +1,3 @@
+---
+title: "Log and Debug"
+---
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/drill/blob/134b6ff8/_docs/log-and-debug/001-log-and-debug-introduction.md
----------------------------------------------------------------------
diff --git a/_docs/log-and-debug/001-log-and-debug-introduction.md b/_docs/log-and-debug/001-log-and-debug-introduction.md
new file mode 100644
index 0000000..473883f
--- /dev/null
+++ b/_docs/log-and-debug/001-log-and-debug-introduction.md
@@ -0,0 +1,15 @@
+---
+title: "Log and Debug Introduction"
+parent: "Log and Debug"
+---
+
+You can use the Drill logs in conjunction with query profiles to troubleshoot issues that you encounter. Drill uses Logback as its default logging system. Logback behavior is defined by configurations set in <drill_installation_directory>/conf/logback.xml. 
+
+You can configure Logback to enable specific loggers for particular components. You can also enable Logback to output log messages to Lilith, a desktop application that you can use for socket logging. By default, Drill outputs log files to /var/log/drill.
+
+Drill provides two standard output files:  
+
+* drillbit.out
+* drill.log
+
+Drill also provides a special file, drillbit_queries.json, on each Drill node. This log provides the QueryID and profile for every query run on a Drillbit. The Profile view in the Drill Web UI lists the last one-hundred queries that Drill ran. To see information for queries beyond the last one-hundred, you can view the drillbit_queries.json file on each Drill node.

http://git-wip-us.apache.org/repos/asf/drill/blob/134b6ff8/_docs/log-and-debug/002-error-messages.md
----------------------------------------------------------------------
diff --git a/_docs/log-and-debug/002-error-messages.md b/_docs/log-and-debug/002-error-messages.md
new file mode 100644
index 0000000..eb0c827
--- /dev/null
+++ b/_docs/log-and-debug/002-error-messages.md
@@ -0,0 +1,25 @@
+---
+title: "Error Messages"
+parent: "Log and Debug"
+---
+
+Drill produces several types of error messages. You can ignore issues that contain any of the following syntax:  
+
+   * InterruptedException
+   * ChannelClosedException
+   * Connection reset by peer
+
+These issues typically result from a problem outside of the query process. However, if you encounter a java.lang.OutOfMemoryError error, take action and give Drill as much memory as possible to resolve the issue. See Configuring Drill Memory.
+
+Drill assigns an ErrorId to each error that occurs. An ErrorID is a unique identifier for a particular error that tells you which node assigned the error. For example,
+[ 1ee8e004-2fce-420f-9790-5c6f8b7cad46 on 10.1.1.109:31010 ]. You can log into the node that assigned the error and grep the Drill log for the ErrorId to get more information about the error.
+
+Thread names in the Drill logs also provide important information, including the query and fragment IDs. If a query fails, you can view the Java stack trace for the executing nodes to determine which part of the query is failing. An example thread, [42d7545c-c89b-47ab-9e38-2787bd200d4e:frag:1:173] includes QueryID 42d7545c-c89b-47ab-9e38-2787bd200d4e, MajorFragmentID 1, and MinorFragmentID 173.
+
+The following table provides descriptions for the IDs included in a thread:
+
+| ID Type         | Description                                                                                                                                                                                                                                  |
+|-----------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| QueryID         | The identifier assigned to the query. You can locate a query in Drill Web UI by the QueryID and then cancel the query if needed. See Query Profiles for more information.                                                                    |
+| MajorFragmentID | The identifier assigned to a major fragment. Major fragments map to the physical plan. You can see major fragment activity for a query in the Drill Web UI. See [Query Profiles]({{site.baseurl}}/docs/query-profiles) for more information. |
+| MinorFragmentID | The identifier assigned to the minor fragment. Minor fragments map to the parallelization of major fragments. See Query Profiles for more information.                                                                                       |
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/drill/blob/134b6ff8/_docs/log-and-debug/003-modify-logback.xml.md
----------------------------------------------------------------------
diff --git a/_docs/log-and-debug/003-modify-logback.xml.md b/_docs/log-and-debug/003-modify-logback.xml.md
new file mode 100644
index 0000000..ca506ad
--- /dev/null
+++ b/_docs/log-and-debug/003-modify-logback.xml.md
@@ -0,0 +1,54 @@
+---
+title: "Modify logback.xml"
+parent: "Log and Debug"
+---
+
+You can access logback.xml in ~<drill_installation_directory>/conf/. The default log level is set to INFO. You can enable debug logging and Lilith in logback.xml. Drill automatically picks up changes to logback.xml if you modify the file while Drill is running. You do not have to restart the cluster for Logback to pick up new settings.
+
+Logback.xml contains two appenders, STDOUT and FILE, that determine where Drill outputs logs. STDOUT output is directed to the console. FILE output is directed to drillbit.log and drillbit.out located in /var/log/drill/conf. You can modify these appenders to redirect log ouput to different locations.
+
+## Enable Debug Logging
+You can enable DEBUG on some loggers for additional information. Each of the classes listed below have a logger that you can add to logback.xml:
+
+* Task execution (similar to MR task submit output)  
+  * org.apache.drill.exec.rpc.control.WorkEventBus  
+  * org.apache.drill.exec.work.fragment.FragmentExecutor  
+  * org.apache.drill.exec.work.foreman.QueryManager  
+* Cluster cohesion and coordination  
+  * org.apache.drill.exec.coord.zk.ZkClusterCoordinator  
+* Query text and plans
+  * org.apache.drill.exec.planner.sql.handlers.DefaultSqlHandler
+
+To enable DEBUG on a logger, add the logger to logback.xml, and set the level value to “debug.”
+The following example shows a task execution logger that you can add to logback.xml:
+
+              <logger name="org.apache.drill.exec.work.foreman.QueryManager" additivity="false">
+                 <level value="debug" />
+                <appender-ref ref="FILE" />
+              </logger>
+
+## Enable Lilith
+You can use Lilith and the socket appender for local debugging. Lillith connects to a Drillbit and shows logging as it happens.
+
+To enable log messages to output to Lilith, uncomment the following two sections in logback.xml and change LILITH_HOSTNAME to that of the machine running the Lilith application:
+
+       <appender name="SOCKET" class="de.huxhorn.lilith.logback.appender.ClassicMultiplexSocketAppender"> <Compressing>true</Compressing> <ReconnectionDelay>10000</ReconnectionDelay> <IncludeCallerData>true</IncludeCallerData> <RemoteHosts>${LILITH_HOSTNAME:-localhost}</RemoteHosts> </appender>
+       
+       <logger name="org.apache.drill" additivity="false">
+           <level value="debug" />
+           <appender-ref ref="SOCKET" />
+       </logger>
+ 
+## Add the Hostname
+Logback includes a layout called PatternLayout that takes a logging event and returns a string. You can modify PatternLayout's conversion pattern to include the hostname in the log message. Add the hostname to the conversion pattern in logback.xml to identify which machine is sending log messages.
+
+The following example shows the conversion pattern with the hostname included:
+
+       <appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
+        <encoder>
+            <pattern>$%d{HH:mm:ss.SSS} %property{HOSTNAME} [%thread] %-5level %logger{36} - %msg%n
+            </pattern>
+        </encoder>
+         </appender>
+
+

http://git-wip-us.apache.org/repos/asf/drill/blob/134b6ff8/_docs/log-and-debug/004-review-the-java-stack-trace.md
----------------------------------------------------------------------
diff --git a/_docs/log-and-debug/004-review-the-java-stack-trace.md b/_docs/log-and-debug/004-review-the-java-stack-trace.md
new file mode 100644
index 0000000..3b74397
--- /dev/null
+++ b/_docs/log-and-debug/004-review-the-java-stack-trace.md
@@ -0,0 +1,41 @@
+---
+title: "Review the Java Stack Trace"
+parent: "Log and Debug"
+---
+
+If a query is failing, you can use jstack to print the Java thread stack traces for the Drillbit process on the executing nodes to determine which part of the query is causing the failure. Drill labels threads based on query coordinates. For example, in the following thread you can see the QueryID, MajorFragementID, and MinorFragmentID, respectively:
+
+       "2ae226e1-f4c5-6253-e9ff-22ac1204935f:frag:3:2" daemon prio=10 tid=0x00007fa81167b800 nid=0x1431 waiting on condition [0x00007fa7aa3b3000]
+
+When you use jstack, grep for frag to see the fragments executing the query. To see the stack trace for a Drillbit process, you must have the Drillbit process ID. You can run jps to get the ID. 
+
+## Use jstack to Review Stack Traces
+The following example shows you how to run jps to get the Drillbit process ID and grep for frag in the stack trace:
+
+       [root@drillats2 ~]# jps
+       17455 jps
+       1975 Drillbit
+       31801 HRegionServer
+       28858 TaskTracker
+       22603 WardenMain
+
+In this example, the Drillbit process ID is 1975. Alternatively, you can grep for the Java processes running to identify the Drillbit process ID:
+
+       [root@drillats2 ~]# ps -el|grep java
+
+Once you have the Drillbit process ID, you can use jstack to view the stack trace of threads.
+       
+       [root@drillats2 ~]# jstack 1975 | grep frag:
+       "2ae22585-bb7b-b482-0542-4ebd5225a249:frag:0:0" daemon prio=10 tid=0x00007fa782796800 nid=0x37a2 runnable [0x00007fa7866e4000]
+       "2ae225ab-c37a-31dc-227e-5f165e057f5f:frag:0:0" daemon prio=10 tid=0x00007fa7a004f000 nid=0x2946 runnable [0x00007fa7868e6000]
+       "2ae22598-2a77-5361-363b-291301fc44e8:frag:0:0" daemon prio=10 tid=0x00007fa782280800 nid=0x1bd4 runnable [0x00007fa7a4293000]
+       "2ae22582-c517-ec7a-8438-c81b7f5fdbce:frag:0:0" daemon prio=10 tid=0x0000000004651800 nid=0x144f runnable [0x00007fa7a53a4000]
+       "2ae226e1-8a2d-c079-a1d6-1f27c5a003bb:frag:13:2" daemon prio=10 tid=0x00007fa811ec3000 nid=0x143d waiting on condition [0x00007fa7a4799000]
+       "2ae226e1-8a2d-c079-a1d6-1f27c5a003bb:frag:9:2" daemon prio=10 tid=0x0000000004583800 nid=0x143c waiting on condition [0x00007fa7a4596000]
+       "2ae226e1-8a2d-c079-a1d6-1f27c5a003bb:frag:15:2" daemon prio=10 tid=0x00007fa81323f800 nid=0x1439 waiting on condition [0x00007fa7a4fa0000]
+       "2ae226e1-8a2d-c079-a1d6-1f27c5a003bb:frag:15:5" daemon prio=10 tid=0x0000000004582800 nid=0x1438 waiting on condition [0x00007fa7a4e9f000]
+       "2ae226e1-8a2d-c079-a1d6-1f27c5a003bb:frag:17:2" daemon prio=10 tid=0x00007fa813240800 nid=0x1435 waiting on condition [0x00007fa7a4496000]
+       "2ae226e1-f4c5-6253-e9ff-22ac1204935f:frag:3:2" daemon prio=10 tid=0x00007fa81167b800 nid=0x1431 waiting on condition [0x00007fa7aa3b3000]
+       "2ae226e1-f4c5-6253-e9ff-22ac1204935f:frag:2:2" daemon prio=10 tid=0x0000000004582000 nid=0x1430 waiting on condition [0x00007fa7a7bae000]
+       …
+       
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/drill/blob/134b6ff8/_docs/odbc-jdbc-interfaces/020-using-jdbc-with-squirrel-on-windows.md
----------------------------------------------------------------------
diff --git a/_docs/odbc-jdbc-interfaces/020-using-jdbc-with-squirrel-on-windows.md b/_docs/odbc-jdbc-interfaces/020-using-jdbc-with-squirrel-on-windows.md
new file mode 100755
index 0000000..be1b173
--- /dev/null
+++ b/_docs/odbc-jdbc-interfaces/020-using-jdbc-with-squirrel-on-windows.md
@@ -0,0 +1,164 @@
+---
+title: "Using JDBC with SQuirreL on Windows"
+parent: "ODBC/JDBC Interfaces"
+---
+To use the JDBC Driver to access Drill through SQuirreL, ensure that you meet the prerequisites and follow the steps in this section.
+### Prerequisites
+
+  * SQuirreL requires JRE 7
+  * Drill installed in distributed mode on one or multiple nodes in a cluster. Refer to the [Install Drill]({{ site.baseurl }}/docs/install-drill/) documentation for more information.
+  * The client must be able to resolve the actual hostname of the Drill node(s) with the IP(s). Verify that a DNS entry was created on the client machine for the Drill node(s).
+     
+If a DNS entry does not exist, create the entry for the Drill node(s).
+
+    * For Windows, create the entry in the %WINDIR%\system32\drivers\etc\hosts file.
+    * For Linux and Mac, create the entry in /etc/hosts.  
+<drill-machine-IP> <drill-machine-hostname>
+    Example: `127.0.1.1 maprdemo`
+
+----------
+
+### Step 1: Getting the Drill JDBC Driver
+
+The Drill JDBC Driver `JAR` file must exist in a directory on your Windows
+machine in order to configure the driver in the SQuirreL client.
+
+You can copy the Drill JDBC `JAR` file from the following Drill installation
+directory on the node with Drill installed, to a directory on your Windows
+machine:
+
+    <drill_installation_directory>/jars/jdbc-driver/drill-jdbc-all-<version>.jar
+
+Or, you can download the [apache-
+drill-0.9.0.tar.gz](http://apache.osuosl.org/drill/drill-0.9.0/apache-drill-0.9.0-src.tar.gz) file to a location on your Windows machine, and
+extract the contents of the file. You may need to use a decompression utility,
+such as [7-zip](http://www.7-zip.org/) to extract the archive. Once extracted,
+you can locate the driver in the following directory:
+
+    <windows_directory>\apache-drill-<version>\jars\jdbc-driver\drill-jdbc-all-<version>.jar
+
+----------
+
+### Step 2: Installing and Starting SQuirreL
+
+To install and start SQuirreL, complete the following steps:
+
+  1. Download the SQuirreL JAR file for Windows from the following location:  
+<http://www.squirrelsql.org/#installation>
+  2. Double-click the SQuirreL `JAR` file. The SQuirreL installation wizard walks you through the installation process.
+  3. When installation completes, navigate to the SQuirreL installation folder and then double-click `squirrel-sql.bat` to start SQuirreL.
+
+----------
+
+### Step 3: Adding the Drill JDBC Driver to SQuirreL
+
+To add the Drill JDBC Driver to SQuirreL, define the driver and create a
+database alias. The alias is a specific instance of the driver configuration.
+SQuirreL uses the driver definition and alias to connect to Drill so you can
+access data sources that you have registered with Drill.
+
+#### A. Define the Driver
+
+To define the Drill JDBC Driver, complete the following steps:
+
+1. In the SQuirreL toolbar, select **Drivers > New Driver**. The Add Driver dialog box appears.
+  
+    ![drill query flow]({{ site.baseurl }}/docs/img/40.png)
+
+2. Enter the following information:
+
+    | Option           | Description                                                                                                                                                                                                          |
+    |------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+    | Name             | Name for the Drill JDBC Driver                                                                                                                                                                                       |
+    | Example URL      | jdbc:drill:zk=<zookeeper_quorum>[;schema=<schema_to_use_as_default>]Example: jdbc:drill:zk=maprdemo:5181Note: The default ZooKeeper port is 2181. In a MapR cluster, the ZooKeeper port is 5181.                     |
+    | Website URL      | jdbc:drill:zk=<zookeeper_quorum>[;schema=<schema_to_use_as_default>]Example: jdbc:drill:zk=maprdemo:5181Note: The default ZooKeeper port is 2181. In a MapR cluster, the ZooKeeper port is 5181.                     |
+    | Extra Class Path | Click Add and navigate to the JDBC JAR file location in the Windows directory:<windows_directory>/jars/jdbc-driver/drill-jdbc-all-0.6.0-incubating.jar Select the JAR file, click Open, and then click List Drivers. |
+    | Class Name       | Select org.apache.drill.jdbc.Driver from the drop-down menu.                                                                                                                                                         |
+  
+3. Click **OK**. The SQuirreL client displays a message stating that the driver registration is successful, and you can see the driver in the Drivers panel.  
+
+   ![drill query flow]({{ site.baseurl }}/docs/img/52.png)
+
+#### B. Create an Alias
+
+To create an alias, complete the following steps:
+
+1. Select the **Aliases** tab.
+2. In the SQuirreL toolbar, select **Aliases >****New Alias**. The Add Alias dialog box appears.
+    
+    ![drill query flow]({{ site.baseurl }}/docs/img/19.png)
+    
+3. Enter the following information:
+  
+     <table style='table-layout:fixed;width:100%'><tbody><tr>
+     <td valign="top" width="10%"><strong>Option</strong></td>
+     <td valign="top" style='width: 500px;'><strong>Description</strong></td>
+     </tr>
+     <tr>
+     <td valign="top">Alias Name</td>
+     <td valign="top">A unique name for the Drill JDBC Driver alias.</td>
+     </tr>
+     <tr>
+     <td valign="top">Driver</td>
+     <td valign="top">Select the Drill JDBC Driver.</td>
+     </tr>
+     <tr>
+     <td valign="top">URL</td>
+     <td valign="top">Enter the connection URL with the name of the Drill directory stored in ZooKeeper and the cluster ID:
+       <code>jdbc:drill:zk=&lt;<em>zookeeper_quorum</em>&gt;/&lt;drill_directory_in_zookeeper&gt;/&lt;cluster_ID&gt;;schema=&lt;<em>schema_to_use_as_default</em>&gt;</code>
+       <em>The following examples show URLs for Drill installed on a single node:</em><br />
+       <span style="font-family: monospace;font-size: 14.0px;line-height: 1.4285715;background-color: transparent;">jdbc:drill:zk=10.10.100.56:5181/drill/demo_mapr_com-drillbits;schema=hive<br /></span>
+       <span style="font-family: monospace;font-size: 14.0px;line-height: 1.4285715;background-color: transparent;">jdbc:drill:zk=10.10.100.24:2181/drill/drillbits1;schema=hive<br /> </span>
+       <em>The following example shows a URL for Drill installed in distributed mode with a connection to a ZooKeeper quorum:</em>
+       <span style="font-family: monospace;font-size: 14.0px;line-height: 1.4285715;background-color: transparent;">jdbc:drill:zk=10.10.100.30:5181,10.10.100.31:5181,10.10.100.32:5181/drill/drillbits1;schema=hive</span>
+          <ul>
+          <li>Including a default schema is optional.</li>
+          <li>The ZooKeeper port is 2181. In a MapR cluster, the ZooKeeper port is 5181.</li>
+          <li>The Drill directory stored in ZooKeeper is <code>/drill</code>.</li>
+          <li>The Drill default cluster ID is<code> drillbits1</code>.</li>
+          </ul>
+     </td></tr><tr>
+     <td valign="top">User Name</td>
+     <td valign="top">admin</td>
+     </tr>
+     <tr>
+     <td valign="top">Password</td>
+     <td valign="top">admin</td>
+     </tr></tbody></table>
+4. Click **Ok**. The Connect to: dialog box appears.  
+
+    ![drill query flow]({{ site.baseurl }}/docs/img/30.png)
+   
+5. Click **Connect**. SQuirreL displays a message stating that the connection is successful.
+  
+    ![drill query flow]({{ site.baseurl }}/docs/img/53.png)
+     
+6. Click **OK**. SQuirreL displays a series of tabs.
+
+----------
+
+### Step 4: Running a Drill Query from SQuirreL
+
+Once you have SQuirreL successfully connected to your cluster through the
+Drill JDBC Driver, you can issue queries from the SQuirreL client. You can run
+a test query on some sample data included in the Drill installation to try out
+SQuirreL with Drill.
+
+To query sample data with Squirrel, complete the following steps:
+
+1. Click the SQL tab.
+2. Enter the following query in the query box:   
+   
+        SELECT * FROM cp.`employee.json`;
+          
+     Example:  
+     ![drill query flow]({{ site.baseurl }}/docs/img/11.png)
+
+3. Press **Ctrl+Enter** to run the query. The following query results display: 
+  
+     ![drill query flow]({{ site.baseurl }}/docs/img/42.png) 
+
+You have successfully run a Drill query from the SQuirreL client.
+
+
+

http://git-wip-us.apache.org/repos/asf/drill/blob/134b6ff8/_docs/odbc-jdbc-interfaces/020-using-jdbc.md
----------------------------------------------------------------------
diff --git a/_docs/odbc-jdbc-interfaces/020-using-jdbc.md b/_docs/odbc-jdbc-interfaces/020-using-jdbc.md
deleted file mode 100755
index d62a5a0..0000000
--- a/_docs/odbc-jdbc-interfaces/020-using-jdbc.md
+++ /dev/null
@@ -1,164 +0,0 @@
----
-title: "Using JDBC"
-parent: "ODBC/JDBC Interfaces"
----
-To use the JDBC Driver to access Drill through Squirrel, ensure that you meet the prerequisites and follow the steps in this section.
-### Prerequisites
-
-  * SQuirreL requires JRE 7
-  * Drill installed in distributed mode on one or multiple nodes in a cluster. Refer to the [Install Drill]({{ site.baseurl }}/docs/install-drill/) documentation for more information.
-  * The client must be able to resolve the actual hostname of the Drill node(s) with the IP(s). Verify that a DNS entry was created on the client machine for the Drill node(s).
-     
-If a DNS entry does not exist, create the entry for the Drill node(s).
-
-    * For Windows, create the entry in the %WINDIR%\system32\drivers\etc\hosts file.
-    * For Linux and Mac, create the entry in /etc/hosts.  
-<drill-machine-IP> <drill-machine-hostname>
-    Example: `127.0.1.1 maprdemo`
-
-----------
-
-### Step 1: Getting the Drill JDBC Driver
-
-The Drill JDBC Driver `JAR` file must exist in a directory on your Windows
-machine in order to configure the driver in the SQuirreL client.
-
-You can copy the Drill JDBC `JAR` file from the following Drill installation
-directory on the node with Drill installed, to a directory on your Windows
-machine:
-
-    <drill_installation_directory>/jars/jdbc-driver/drill-jdbc-all-<version>.jar
-
-Or, you can download the [apache-
-drill-0.9.0.tar.gz](http://apache.osuosl.org/drill/drill-0.9.0/apache-drill-0.9.0-src.tar.gz) file to a location on your Windows machine, and
-extract the contents of the file. You may need to use a decompression utility,
-such as [7-zip](http://www.7-zip.org/) to extract the archive. Once extracted,
-you can locate the driver in the following directory:
-
-    <windows_directory>\apache-drill-<version>\jars\jdbc-driver\drill-jdbc-all-<version>.jar
-
-----------
-
-### Step 2: Installing and Starting SQuirreL
-
-To install and start SQuirreL, complete the following steps:
-
-  1. Download the SQuirreL JAR file for Windows from the following location:  
-<http://www.squirrelsql.org/#installation>
-  2. Double-click the SQuirreL `JAR` file. The SQuirreL installation wizard walks you through the installation process.
-  3. When installation completes, navigate to the SQuirreL installation folder and then double-click `squirrel-sql.bat` to start SQuirreL.
-
-----------
-
-### Step 3: Adding the Drill JDBC Driver to SQuirreL
-
-To add the Drill JDBC Driver to SQuirreL, define the driver and create a
-database alias. The alias is a specific instance of the driver configuration.
-SQuirreL uses the driver definition and alias to connect to Drill so you can
-access data sources that you have registered with Drill.
-
-#### A. Define the Driver
-
-To define the Drill JDBC Driver, complete the following steps:
-
-1. In the SQuirreL toolbar, select **Drivers > New Driver**. The Add Driver dialog box appears.
-  
-    ![drill query flow]({{ site.baseurl }}/docs/img/40.png)
-
-2. Enter the following information:
-
-    | Option           | Description                                                                                                                                                                                                          |
-    |------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-    | Name             | Name for the Drill JDBC Driver                                                                                                                                                                                       |
-    | Example URL      | jdbc:drill:zk=<zookeeper_quorum>[;schema=<schema_to_use_as_default>]Example: jdbc:drill:zk=maprdemo:5181Note: The default ZooKeeper port is 2181. In a MapR cluster, the ZooKeeper port is 5181.                     |
-    | Website URL      | jdbc:drill:zk=<zookeeper_quorum>[;schema=<schema_to_use_as_default>]Example: jdbc:drill:zk=maprdemo:5181Note: The default ZooKeeper port is 2181. In a MapR cluster, the ZooKeeper port is 5181.                     |
-    | Extra Class Path | Click Add and navigate to the JDBC JAR file location in the Windows directory:<windows_directory>/jars/jdbc-driver/drill-jdbc-all-0.6.0-incubating.jar Select the JAR file, click Open, and then click List Drivers. |
-    | Class Name       | Select org.apache.drill.jdbc.Driver from the drop-down menu.                                                                                                                                                         |
-  
-3. Click **OK**. The SQuirreL client displays a message stating that the driver registration is successful, and you can see the driver in the Drivers panel.  
-
-   ![drill query flow]({{ site.baseurl }}/docs/img/52.png)
-
-#### B. Create an Alias
-
-To create an alias, complete the following steps:
-
-1. Select the **Aliases** tab.
-2. In the SQuirreL toolbar, select **Aliases >****New Alias**. The Add Alias dialog box appears.
-    
-    ![drill query flow]({{ site.baseurl }}/docs/img/19.png)
-    
-3. Enter the following information:
-  
-     <table style='table-layout:fixed;width:100%'><tbody><tr>
-     <td valign="top" width="10%"><strong>Option</strong></td>
-     <td valign="top" style='width: 500px;'><strong>Description</strong></td>
-     </tr>
-     <tr>
-     <td valign="top">Alias Name</td>
-     <td valign="top">A unique name for the Drill JDBC Driver alias.</td>
-     </tr>
-     <tr>
-     <td valign="top">Driver</td>
-     <td valign="top">Select the Drill JDBC Driver.</td>
-     </tr>
-     <tr>
-     <td valign="top">URL</td>
-     <td valign="top">Enter the connection URL with the name of the Drill directory stored in ZooKeeper and the cluster ID:
-       <code>jdbc:drill:zk=&lt;<em>zookeeper_quorum</em>&gt;/&lt;drill_directory_in_zookeeper&gt;/&lt;cluster_ID&gt;;schema=&lt;<em>schema_to_use_as_default</em>&gt;</code>
-       <em>The following examples show URLs for Drill installed on a single node:</em><br />
-       <span style="font-family: monospace;font-size: 14.0px;line-height: 1.4285715;background-color: transparent;">jdbc:drill:zk=10.10.100.56:5181/drill/demo_mapr_com-drillbits;schema=hive<br /></span>
-       <span style="font-family: monospace;font-size: 14.0px;line-height: 1.4285715;background-color: transparent;">jdbc:drill:zk=10.10.100.24:2181/drill/drillbits1;schema=hive<br /> </span>
-       <em>The following example shows a URL for Drill installed in distributed mode with a connection to a ZooKeeper quorum:</em>
-       <span style="font-family: monospace;font-size: 14.0px;line-height: 1.4285715;background-color: transparent;">jdbc:drill:zk=10.10.100.30:5181,10.10.100.31:5181,10.10.100.32:5181/drill/drillbits1;schema=hive</span>
-          <ul>
-          <li>Including a default schema is optional.</li>
-          <li>The ZooKeeper port is 2181. In a MapR cluster, the ZooKeeper port is 5181.</li>
-          <li>The Drill directory stored in ZooKeeper is <code>/drill</code>.</li>
-          <li>The Drill default cluster ID is<code> drillbits1</code>.</li>
-          </ul>
-     </td></tr><tr>
-     <td valign="top">User Name</td>
-     <td valign="top">admin</td>
-     </tr>
-     <tr>
-     <td valign="top">Password</td>
-     <td valign="top">admin</td>
-     </tr></tbody></table>
-4. Click **Ok**. The Connect to: dialog box appears.  
-
-    ![drill query flow]({{ site.baseurl }}/docs/img/30.png)
-   
-5. Click **Connect**. SQuirreL displays a message stating that the connection is successful.
-  
-    ![drill query flow]({{ site.baseurl }}/docs/img/53.png)
-     
-6. Click **OK**. SQuirreL displays a series of tabs.
-
-----------
-
-### Step 4: Running a Drill Query from SQuirreL
-
-Once you have SQuirreL successfully connected to your cluster through the
-Drill JDBC Driver, you can issue queries from the SQuirreL client. You can run
-a test query on some sample data included in the Drill installation to try out
-SQuirreL with Drill.
-
-To query sample data with Squirrel, complete the following steps:
-
-1. Click the SQL tab.
-2. Enter the following query in the query box:   
-   
-        SELECT * FROM cp.`employee.json`;
-          
-     Example:  
-     ![drill query flow]({{ site.baseurl }}/docs/img/11.png)
-
-3. Press **Ctrl+Enter** to run the query. The following query results display: 
-  
-     ![drill query flow]({{ site.baseurl }}/docs/img/42.png) 
-
-You have successfully run a Drill query from the SQuirreL client.
-
-
-

http://git-wip-us.apache.org/repos/asf/drill/blob/134b6ff8/_docs/sql-reference/sql-functions/073-log-and-debug.md
----------------------------------------------------------------------
diff --git a/_docs/sql-reference/sql-functions/073-log-and-debug.md b/_docs/sql-reference/sql-functions/073-log-and-debug.md
new file mode 100644
index 0000000..142d6b6
--- /dev/null
+++ b/_docs/sql-reference/sql-functions/073-log-and-debug.md
@@ -0,0 +1,3 @@
+---
+title: "Log and Debug"
+---
\ No newline at end of file


[11/31] drill git commit: minor edit

Posted by ts...@apache.org.
minor edit


Project: http://git-wip-us.apache.org/repos/asf/drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/drill/commit/e3b2c1a3
Tree: http://git-wip-us.apache.org/repos/asf/drill/tree/e3b2c1a3
Diff: http://git-wip-us.apache.org/repos/asf/drill/diff/e3b2c1a3

Branch: refs/heads/gh-pages
Commit: e3b2c1a319e3c826db4797873cf45b03a3ff46ac
Parents: f61eaf9
Author: Kristine Hahn <kh...@maprtech.com>
Authored: Sun May 17 19:46:23 2015 -0700
Committer: Kristine Hahn <kh...@maprtech.com>
Committed: Sun May 17 19:46:23 2015 -0700

----------------------------------------------------------------------
 _docs/sql-reference/sql-functions/020-data-type-conversion.md | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/drill/blob/e3b2c1a3/_docs/sql-reference/sql-functions/020-data-type-conversion.md
----------------------------------------------------------------------
diff --git a/_docs/sql-reference/sql-functions/020-data-type-conversion.md b/_docs/sql-reference/sql-functions/020-data-type-conversion.md
index a6ec16e..c60c096 100644
--- a/_docs/sql-reference/sql-functions/020-data-type-conversion.md
+++ b/_docs/sql-reference/sql-functions/020-data-type-conversion.md
@@ -30,7 +30,7 @@ If the SELECT statement includes a WHERE clause that compares a column of an unk
 
     SELECT c_row, CAST(c_int AS DECIMAL(28,8)) FROM mydata WHERE CAST(c_int AS DECIMAL(28,8)) > -3.0;
 
-{% include startnote.html %}In this release, Drill disables the DECIMAL data type. To enable, set the planner.enable_decimal_data_type option to `true`.{% include endnote.html %}
+{% include startnote.html %}In this release, Drill disables the DECIMAL data type. To enable, set the planner.enable_decimal_data_type option to true.{% include endnote.html %}
 
 Use CONVERT_TO and CONVERT_FROM instead of the CAST function for converting binary data types with one exception: When converting an INT or BIGINT number, having a byte count in the destination/source that does not match the byte count of the number in the VARBINARY source/destination, use CAST.  
 
@@ -56,7 +56,7 @@ The following example shows how to cast a character to a DECIMAL having two deci
     | 1.00       |
     +------------+
 
-{% include startnote.html %}In this release, Drill disables the DECIMAL data type. To enable, set the planner.enable_decimal_data_type option to `true`.{% include endnote.html %}
+{% include startnote.html %}In this release, Drill disables the DECIMAL data type. To enable, set the planner.enable_decimal_data_type option to true.{% include endnote.html %}
 
 ### Casting a Number to a Character String
 The first example shows Drill casting a number to a VARCHAR having a length of 3 bytes: The result is a 3-character string, 456. Drill supports the CHAR and CHARACTER VARYING alias.
@@ -89,7 +89,7 @@ Cast an integer to a decimal.
     +------------+
     1 row selected (0.08 seconds)
 
-{% include startnote.html %}In this release, Drill disables the DECIMAL data type. To enable, set the planner.enable_decimal_data_type option to `true`.{% include endnote.html %}
+{% include startnote.html %}In this release, Drill disables the DECIMAL data type. To enable, set the planner.enable_decimal_data_type option to true.{% include endnote.html %}
 
 ### Casting Intervals
 


[15/31] drill git commit: fix links

Posted by ts...@apache.org.
fix links


Project: http://git-wip-us.apache.org/repos/asf/drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/drill/commit/5f6a51af
Tree: http://git-wip-us.apache.org/repos/asf/drill/tree/5f6a51af
Diff: http://git-wip-us.apache.org/repos/asf/drill/diff/5f6a51af

Branch: refs/heads/gh-pages
Commit: 5f6a51af253b4fe5fec2cd80705fe74b985d31b5
Parents: 5a7f700
Author: Kristine Hahn <kh...@maprtech.com>
Authored: Mon May 18 01:26:33 2015 -0700
Committer: Kristine Hahn <kh...@maprtech.com>
Committed: Mon May 18 01:26:33 2015 -0700

----------------------------------------------------------------------
 .../090-mongodb-plugin-for-apache-drill.md      |  26 +--
 ...ata-sources-and-file-formats-introduction.md |   2 +-
 .../030-deploying-and-using-a-hive-udf.md       |   2 +-
 .../040-parquet-format.md                       |   4 +-
 .../050-json-data-model.md                      |  14 +-
 .../020-develop-a-simple-function.md            |   4 +-
 .../030-developing-an-aggregate-function.md     |   4 +-
 _docs/img/18.png                                | Bin 22253 -> 18137 bytes
 .../030-starting-drill-on-linux-and-mac-os-x.md |   2 +-
 ...microstrategy-analytics-with-apache-drill.md |   4 +-
 _docs/query-data/010-query-data-introduction.md |  14 +-
 _docs/query-data/030-querying-hbase.md          |  41 ++--
 _docs/query-data/050-querying-hive.md           |   2 +-
 .../060-querying-the-information-schema.md      |   2 +-
 _docs/query-data/070-query-sys-tbl.md           |  89 ++++----
 .../010-querying-json-files.md                  |  33 +--
 .../020-querying-parquet-files.md               | 100 +++++----
 .../030-querying-plain-text-files.md            |  72 +++----
 .../040-querying-directories.md                 |  34 +--
 .../005-querying-complex-data-introduction.md   |   4 +-
 _docs/sql-reference/090-sql-extensions.md       |   8 +-
 .../data-types/010-supported-data-types.md      |  20 +-
 .../nested-data-functions/010-flatten.md        |   8 +-
 .../nested-data-functions/020-kvgen.md          |   5 +-
 .../sql-functions/010-math-and-trig.md          |  14 +-
 .../sql-functions/020-data-type-conversion.md   | 211 ++++++++++---------
 .../030-date-time-functions-and-arithmetic.md   | 186 ++++++++--------
 .../sql-functions/040-string-manipulation.md    | 143 ++++++-------
 .../050-aggregate-and-aggregate-statistical.md  |   8 +-
 _docs/tutorials/010-tutorials-introduction.md   |   6 +-
 _docs/tutorials/020-drill-in-10-minutes.md      |   2 +-
 .../030-analyzing-the-yelp-academic-dataset.md  |  13 +-
 .../050-analyzing-highly-dynamic-datasets.md    |   8 +-
 .../020-getting-to-know-the-drill-sandbox.md    |   2 -
 34 files changed, 528 insertions(+), 559 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/drill/blob/5f6a51af/_docs/connect-a-data-source/090-mongodb-plugin-for-apache-drill.md
----------------------------------------------------------------------
diff --git a/_docs/connect-a-data-source/090-mongodb-plugin-for-apache-drill.md b/_docs/connect-a-data-source/090-mongodb-plugin-for-apache-drill.md
index ff1c736..72bdbeb 100644
--- a/_docs/connect-a-data-source/090-mongodb-plugin-for-apache-drill.md
+++ b/_docs/connect-a-data-source/090-mongodb-plugin-for-apache-drill.md
@@ -85,19 +85,19 @@ Reference]({{ site.baseurl }}/docs/sql-reference).
 **Example 1: View mongo.zipdb Dataset**
 
     0: jdbc:drill:zk=local> SELECT * FROM zipcodes LIMIT 10;
-+------------------------------------------------------------------------------------------------+
-|                                           *                                                    |
-+------------------------------------------------------------------------------------------------+
-| { "city" : "AGAWAM" , "loc" : [ -72.622739 , 42.070206] , "pop" : 15338 , "state" : "MA"}      |
-| { "city" : "CUSHMAN" , "loc" : [ -72.51565 , 42.377017] , "pop" : 36963 , "state" : "MA"}      |
-| { "city" : "BARRE" , "loc" : [ -72.108354 , 42.409698] , "pop" : 4546 , "state" : "MA"}        |
-| { "city" : "BELCHERTOWN" , "loc" : [ -72.410953 , 42.275103] , "pop" : 10579 , "state" : "MA"} |
-| { "city" : "BLANDFORD" , "loc" : [ -72.936114 , 42.182949] , "pop" : 1240 , "state" : "MA"}    |
-| { "city" : "BRIMFIELD" , "loc" : [ -72.188455 , 42.116543] , "pop" : 3706 , "state" : "MA"}    |
-| { "city" : "CHESTER" , "loc" : [ -72.988761 , 42.279421] , "pop" : 1688 , "state" : "MA"}      |
-| { "city" : "CHESTERFIELD" , "loc" : [ -72.833309 , 42.38167] , "pop" : 177 , "state" : "MA"}   |
-| { "city" : "CHICOPEE" , "loc" : [ -72.607962 , 42.162046] , "pop" : 23396 , "state" : "MA"}    |
-| { "city" : "CHICOPEE" , "loc" : [ -72.576142 , 42.176443] , "pop" : 31495 , "state" : "MA"}    |
+    +------------------------------------------------------------------------------------------------+
+    |                                           *                                                    |
+    +------------------------------------------------------------------------------------------------+
+    | { "city" : "AGAWAM" , "loc" : [ -72.622739 , 42.070206] , "pop" : 15338 , "state" : "MA"}      |
+    | { "city" : "CUSHMAN" , "loc" : [ -72.51565 , 42.377017] , "pop" : 36963 , "state" : "MA"}      |
+    | { "city" : "BARRE" , "loc" : [ -72.108354 , 42.409698] , "pop" : 4546 , "state" : "MA"}        |
+    | { "city" : "BELCHERTOWN" , "loc" : [ -72.410953 , 42.275103] , "pop" : 10579 , "state" : "MA"} |
+    | { "city" : "BLANDFORD" , "loc" : [ -72.936114 , 42.182949] , "pop" : 1240 , "state" : "MA"}    |
+    | { "city" : "BRIMFIELD" , "loc" : [ -72.188455 , 42.116543] , "pop" : 3706 , "state" : "MA"}    |
+    | { "city" : "CHESTER" , "loc" : [ -72.988761 , 42.279421] , "pop" : 1688 , "state" : "MA"}      |
+    | { "city" : "CHESTERFIELD" , "loc" : [ -72.833309 , 42.38167] , "pop" : 177 , "state" : "MA"}   |
+    | { "city" : "CHICOPEE" , "loc" : [ -72.607962 , 42.162046] , "pop" : 23396 , "state" : "MA"}    |
+    | { "city" : "CHICOPEE" , "loc" : [ -72.576142 , 42.176443] , "pop" : 31495 , "state" : "MA"}    |
 
 **Example 2: Aggregation**
 

http://git-wip-us.apache.org/repos/asf/drill/blob/5f6a51af/_docs/data-sources-and-file-formats/010-data-sources-and-file-formats-introduction.md
----------------------------------------------------------------------
diff --git a/_docs/data-sources-and-file-formats/010-data-sources-and-file-formats-introduction.md b/_docs/data-sources-and-file-formats/010-data-sources-and-file-formats-introduction.md
index d758a50..d468c40 100644
--- a/_docs/data-sources-and-file-formats/010-data-sources-and-file-formats-introduction.md
+++ b/_docs/data-sources-and-file-formats/010-data-sources-and-file-formats-introduction.md
@@ -22,4 +22,4 @@ Drill supports the following input formats for data:
 
 You set the input format for data coming from data sources to Drill in the workspace portion of the [storage plugin]({{ site.baseurl }}/docs/storage-plugin-registration) definition. The default input format in Drill is Parquet. 
 
-You change the [sys.options table]({{ site.baseurl }}/docs/planning-and-execution-options) to set the output format of Drill data. The default storage format for Drill CREATE TABLE AS (CTAS) statements is Parquet.
\ No newline at end of file
+You change one of the `store` property in the [sys.options table]({{ site.baseurl }}/docs/configuration-options-introduction/) to set the output format of Drill data. The default storage format for Drill CREATE TABLE AS (CTAS) statements is Parquet.
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/drill/blob/5f6a51af/_docs/data-sources-and-file-formats/030-deploying-and-using-a-hive-udf.md
----------------------------------------------------------------------
diff --git a/_docs/data-sources-and-file-formats/030-deploying-and-using-a-hive-udf.md b/_docs/data-sources-and-file-formats/030-deploying-and-using-a-hive-udf.md
index 6a26376..2cc0db0 100644
--- a/_docs/data-sources-and-file-formats/030-deploying-and-using-a-hive-udf.md
+++ b/_docs/data-sources-and-file-formats/030-deploying-and-using-a-hive-udf.md
@@ -21,7 +21,7 @@ After you export the custom UDF as a JAR, perform the UDF setup tasks so Drill c
  
 To set up the UDF:
 
-1. Register Hive. [Register a Hive storage plugin]({{ site.baseurl }}/docs/registering-hive/) that connects Drill to a Hive data source.
+1. Register Hive. [Register a Hive storage plugin]({{ site.baseurl }}/docs/hive-storage-plugin/) that connects Drill to a Hive data source.
 2. Add the JAR for the UDF to the Drill CLASSPATH. In earlier versions of Drill, place the JAR file in the `/jars/3rdparty` directory of the Drill installation on all nodes running a Drillbit.
 3. On each Drill node in the cluster, restart the Drillbit.
    `<drill installation directory>/bin/drillbit.sh restart`

http://git-wip-us.apache.org/repos/asf/drill/blob/5f6a51af/_docs/data-sources-and-file-formats/040-parquet-format.md
----------------------------------------------------------------------
diff --git a/_docs/data-sources-and-file-formats/040-parquet-format.md b/_docs/data-sources-and-file-formats/040-parquet-format.md
index ca8b164..5cfc83f 100644
--- a/_docs/data-sources-and-file-formats/040-parquet-format.md
+++ b/_docs/data-sources-and-file-formats/040-parquet-format.md
@@ -48,14 +48,14 @@ To maximize performance, set the target size of a Parquet row group to the numbe
 The default block size is 536870912 bytes.
 
 ### Type Mapping
-The high correlation between Parquet and SQL data types makes reading Parquet files effortless in Drill. Writing to Parquet files takes more work than reading. Because SQL does not support all Parquet data types, to prevent Drill from inferring a type other than one you want, use the [cast function] ({{ site.baseurl }}/docs/sql-functions) Drill offers more liberal casting capabilities than SQL for Parquet conversions if the Parquet data is of a logical type. 
+The high correlation between Parquet and SQL data types makes reading Parquet files effortless in Drill. Writing to Parquet files takes more work than reading. Because SQL does not support all Parquet data types, to prevent Drill from inferring a type other than one you want, use the [cast function] ({{ site.baseurl }}/docs/data-type-conversion/#cast) Drill offers more liberal casting capabilities than SQL for Parquet conversions if the Parquet data is of a logical type. 
 
 The following general process converts a file from JSON to Parquet:
 
 * Create or use an existing storage plugin that specifies the storage location of the Parquet file, mutability of the data, and supported file formats.
 * Take a look at the JSON data. 
 * Create a table that selects the JSON file.
-* In the CTAS command, cast JSON string data to corresponding [SQL types]({{ site.baseurl }}/docs/json-data-model/data-type-mapping).
+* In the CTAS command, cast JSON string data to corresponding [SQL types]({{ site.baseurl }}/docs/json-data-model/#data-type-mapping).
 
 ### Example: Read JSON, Write Parquet
 This example demonstrates a storage plugin definition, a sample row of data from a JSON file, and a Drill query that writes the JSON input to Parquet output. 

http://git-wip-us.apache.org/repos/asf/drill/blob/5f6a51af/_docs/data-sources-and-file-formats/050-json-data-model.md
----------------------------------------------------------------------
diff --git a/_docs/data-sources-and-file-formats/050-json-data-model.md b/_docs/data-sources-and-file-formats/050-json-data-model.md
index 90b69a1..548b709 100644
--- a/_docs/data-sources-and-file-formats/050-json-data-model.md
+++ b/_docs/data-sources-and-file-formats/050-json-data-model.md
@@ -53,15 +53,15 @@ Set the `store.json.read_numbers_as_double` property to true.
 
 When you set this option, Drill reads all numbers from the JSON files as DOUBLE. After reading the data, use a SELECT statement in Drill to cast data as follows:
 
-* Cast JSON values to [SQL types]({{ site.baseurl }}/docs/data-types), such as BIGINT, FLOAT, and INTEGER.
-* Cast JSON strings to [Drill Date/Time Data Type Formats]({{ site.baseurl }}/docs/supported-date-time-data-type-formats).
+* Cast JSON values to [SQL types]({{ site.baseurl }}/docs/json-data-model/#data-type-mapping), such as BIGINT, FLOAT, and INTEGER.
+* Cast JSON strings to [Drill Date/Time Data Type Formats]({{ site.baseurl }}/docs/date-time-and-timestamp).
 
-Drill uses [map and array data types]({{ site.baseurl }}/docs/data-types) internally for reading complex and nested data structures from JSON. You can cast data in a map or array of data to return a value from the structure, as shown in [“Create a view on a MapR-DB table”] ({{ site.baseurl }}/docs/lesson-2-run-queries-with-ansi-sql). [“Query Complex Data”]({{ site.baseurl }}/docs/querying-complex-data-introduction) shows how to access nested arrays.
+Drill uses [map and array data types]({{ site.baseurl }}/docs/handling-different-data-types/#handling-json-and-parquet-data) internally for reading complex and nested data structures from JSON. You can cast data in a map or array of data to return a value from the structure, as shown in [“Create a view on a MapR-DB table”] ({{ site.baseurl }}/docs/lesson-2-run-queries-with-ansi-sql/#create-a-view-on-a-mapr-db-table). [“Query Complex Data”]({{ site.baseurl }}/docs/querying-complex-data-introduction) shows how to access nested arrays.
 
 ## Reading JSON
-To read JSON data using Drill, use a [file system storage plugin]({{ site.baseurl }}/docs/connect-to-a-data-source) that defines the JSON format. You can use the `dfs` storage plugin, which includes the definition. 
+To read JSON data using Drill, use a [file system storage plugin]({{ site.baseurl }}/docs/file-system-storage-plugin/) that defines the JSON format. You can use the `dfs` storage plugin, which includes the definition. 
 
-JSON data is often complex. Data can be deeply nested and semi-structured. but [you can use workarounds ]({{ site.baseurl }}/docs/json-data-model#limitations-and-workaroumds) covered later.
+JSON data is often complex. Data can be deeply nested and semi-structured. but you can use [workarounds ]({{ site.baseurl }}/docs/json-data-model/#limitations-and-workarounds) covered later.
 
 Drill reads tuples defined in single objects, having no comma between objects. A JSON object is an unordered set of name/value pairs. Curly braces delimit objects in the JSON file:
 
@@ -310,7 +310,7 @@ To access the second geometry coordinate of the first city lot in the San Franci
     +-------------------+
     1 row selected (0.19 seconds)
 
-More examples of drilling down into an array are shown in ["Selecting Nested Data for a Column"]({{ site.baseurl }}/docs/query-3-selecting-nested-data-for-a-column). 
+More examples of drilling down into an array are shown in ["Selecting Nested Data for a Column"]({{ site.baseurl }}/docs/selecting-nested-data-for-a-column). 
 
 ### Example: Flatten an Array of Maps using a Subquery
 By flattening the following JSON file, which contains an array of maps, you can evaluate the records of the flattened data. 
@@ -449,7 +449,7 @@ Workaround: Separate lengthy objects into objects delimited by curly braces usin
  
 * [FLATTEN]({{ site.baseurl }}/docs/json-data-model#flatten-json-data) separates a set of nested JSON objects into individual rows in a DRILL table.
 
-* [KVGEN]({{ site.baseurl }}/docs/json-data-model#generate-key-value-pairs) separates objects having more elements than optimal for querying.
+* [KVGEN]({{ site.baseurl }}/docs/kvgen/) separates objects having more elements than optimal for querying.
 
   
 ### Nested Column Names 

http://git-wip-us.apache.org/repos/asf/drill/blob/5f6a51af/_docs/develop-custom-functions/020-develop-a-simple-function.md
----------------------------------------------------------------------
diff --git a/_docs/develop-custom-functions/020-develop-a-simple-function.md b/_docs/develop-custom-functions/020-develop-a-simple-function.md
index cdc1876..094f0c1 100644
--- a/_docs/develop-custom-functions/020-develop-a-simple-function.md
+++ b/_docs/develop-custom-functions/020-develop-a-simple-function.md
@@ -4,8 +4,8 @@ parent: "Develop Custom Functions"
 ---
 Create a class within a Java package that implements Drill’s simple interface
 into the program, and include the required information for the function type.
-Your function must include data types that Drill supports, such as int or
-BigInt. For a list of supported data types, refer to the [SQL Reference]({{ site.baseurl }}/docs/sql-reference).
+Your function must include data types that Drill supports, such as INTEGER or
+BIGINT. For a list of supported data types, refer to the [SQL Reference]({{ site.baseurl }}/docs/supported-data-types/).
 
 Complete the following steps to develop a simple function using Drill’s simple
 function interface:

http://git-wip-us.apache.org/repos/asf/drill/blob/5f6a51af/_docs/develop-custom-functions/030-developing-an-aggregate-function.md
----------------------------------------------------------------------
diff --git a/_docs/develop-custom-functions/030-developing-an-aggregate-function.md b/_docs/develop-custom-functions/030-developing-an-aggregate-function.md
index 520c044..3368c24 100644
--- a/_docs/develop-custom-functions/030-developing-an-aggregate-function.md
+++ b/_docs/develop-custom-functions/030-developing-an-aggregate-function.md
@@ -4,8 +4,8 @@ parent: "Develop Custom Functions"
 ---
 Create a class within a Java package that implements Drill’s aggregate
 interface into the program. Include the required information for the function.
-Your function must include data types that Drill supports, such as int or
-BigInt. For a list of supported data types, refer to the [SQL Reference]({{ site.baseurl }}/docs/sql-reference).
+Your function must include data types that Drill supports, such as INTEGER or
+BIGINT. For a list of supported data types, refer to the [SQL Reference]({{ site.baseurl }}/docs/supported-data-types/).
 
 Complete the following steps to create an aggregate function:
 

http://git-wip-us.apache.org/repos/asf/drill/blob/5f6a51af/_docs/img/18.png
----------------------------------------------------------------------
diff --git a/_docs/img/18.png b/_docs/img/18.png
index 691b816..ac5b802 100644
Binary files a/_docs/img/18.png and b/_docs/img/18.png differ

http://git-wip-us.apache.org/repos/asf/drill/blob/5f6a51af/_docs/install/installing-drill-in-embedded-mode/030-starting-drill-on-linux-and-mac-os-x.md
----------------------------------------------------------------------
diff --git a/_docs/install/installing-drill-in-embedded-mode/030-starting-drill-on-linux-and-mac-os-x.md b/_docs/install/installing-drill-in-embedded-mode/030-starting-drill-on-linux-and-mac-os-x.md
index cdbdf20..697f425 100644
--- a/_docs/install/installing-drill-in-embedded-mode/030-starting-drill-on-linux-and-mac-os-x.md
+++ b/_docs/install/installing-drill-in-embedded-mode/030-starting-drill-on-linux-and-mac-os-x.md
@@ -14,7 +14,7 @@ Start the Drill shell using the `drill-embedded` command. The command uses a jdb
 
    The `0: jdbc:drill:zk=local>`  prompt appears.  
 
-   At this point, you can [run queries]({{site.baseurl}}/docs/drill-in-10-minutes#query-sample-data).
+   At this point, you can [run queries]({{site.baseurl}}/docs/query-data).
 
 You can also use the **sqlline** command to start Drill using a custom connection string, as described in ["Using an Ad-Hoc Connection to Drill"](docs/starting-drill-in-distributed-mode/#using-an-ad-hoc-connection-to-drill). For example, you can specify the storage plugin when you start the shell. Doing so eliminates the need to specify the storage plugin in the query: For example, this command specifies the `dfs` storage plugin.
 

http://git-wip-us.apache.org/repos/asf/drill/blob/5f6a51af/_docs/odbc-jdbc-interfaces/050-using-microstrategy-analytics-with-apache-drill.md
----------------------------------------------------------------------
diff --git a/_docs/odbc-jdbc-interfaces/050-using-microstrategy-analytics-with-apache-drill.md b/_docs/odbc-jdbc-interfaces/050-using-microstrategy-analytics-with-apache-drill.md
index cdade1c..680e139 100755
--- a/_docs/odbc-jdbc-interfaces/050-using-microstrategy-analytics-with-apache-drill.md
+++ b/_docs/odbc-jdbc-interfaces/050-using-microstrategy-analytics-with-apache-drill.md
@@ -98,9 +98,7 @@ You can now use MicroStrategy Analytics Enterprise to access Drill as a database
 This step includes an example scenario that shows you how to use MicroStrategy, with Drill as the database instance, to analyze Twitter data stored as complex JSON documents. 
 
 ####Scenario
-The Drill distributed file system plugin is configured to read Twitter data in a directory structure. A view is created in Drill to capture the most relevant maps and nested maps and arrays for the Twitter JSON documents. Refer to the following page for more information about how to configure and use Drill to work with complex data:
-
-https://cwiki.apache.org/confluence/display/DRILL/Query+Data
+The Drill distributed file system plugin is configured to read Twitter data in a directory structure. A view is created in Drill to capture the most relevant maps and nested maps and arrays for the Twitter JSON documents. Refer to [Query Data](/docs/query-data-introduction/) for more information about how to configure and use Drill to work with complex data:
 
 ####Part 1: Create a Project
 Complete the following steps to create a project:

http://git-wip-us.apache.org/repos/asf/drill/blob/5f6a51af/_docs/query-data/010-query-data-introduction.md
----------------------------------------------------------------------
diff --git a/_docs/query-data/010-query-data-introduction.md b/_docs/query-data/010-query-data-introduction.md
index 708a2b7..980c975 100644
--- a/_docs/query-data/010-query-data-introduction.md
+++ b/_docs/query-data/010-query-data-introduction.md
@@ -3,11 +3,12 @@ title: "Query Data Introduction"
 parent: "Query Data"
 ---
 You can query local and distributed file systems, Hive, and HBase data sources
-registered with Drill. If you connect directly to a particular schema when
-you invoke SQLLine, you can issue SQL queries against that schema. If you d0
-not indicate a schema when you invoke SQLLine, you can issue the `USE
-<schema>` statement to run your queries against a particular schema. After you
-issue the `USE` statement, you can use absolute notation, such as `schema.table.column`.
+registered with Drill. You issue the `USE
+<storage plugin>` statement to run your queries against a particular storage plugin. You use dot notation and back ticks to specify the storage plugin name and sometimes the workspace name. For example, to use the dfs storage plugin and default workspace, issue this command: ``USE dfs.`default``
+
+Alternatively, you can omit the USE statement, and specify the storage plugin and workspace name using dot notation and back ticks. For example:
+
+``dfs.`default`.`/Users/drill-user/apache-drill-1.0.0/log/sqlline_queries.json```;
 
 You may need to use casting functions in some queries. For example, you may
 have to cast a string `"100"` to an integer in order to apply a math function
@@ -23,9 +24,6 @@ text may help you isolate the problem.
 The set command increases the default text display (number of characters). By
 default, most of the plan output is hidden.
 
-You may see errors if you try to use non-standard or unsupported SQL syntax in
-a query.
-
 Remember the following tips when querying data with Drill:
 
   * Include a semicolon at the end of SQL statements, except when you issue a command with an exclamation point `(!).   

http://git-wip-us.apache.org/repos/asf/drill/blob/5f6a51af/_docs/query-data/030-querying-hbase.md
----------------------------------------------------------------------
diff --git a/_docs/query-data/030-querying-hbase.md b/_docs/query-data/030-querying-hbase.md
index bb8cc59..7febf42 100644
--- a/_docs/query-data/030-querying-hbase.md
+++ b/_docs/query-data/030-querying-hbase.md
@@ -2,7 +2,7 @@
 title: "Querying HBase"
 parent: "Query Data"
 ---
-This exercise creates two tables in HBase, students and clicks, that you can query with Drill. As an HBase user, you most likely are running Drill in distributed mode, in which Drill might start as a service. If you are not an HBase user and just kicking the tires, you might use the Drill Sandbox on a single-node cluster (embedded mode). In this case, you need to [start Drill]({{ site.baseurl }}/docs/install-drill/) before performing step 5 of this exercise. On the Drill Sandbox, HBase tables you create will be located in: /mapr/demo.mapr.com/tables
+This exercise creates two tables in HBase, students and clicks, that you can query with Drill. As an HBase user, you most likely are running Drill in distributed mode, in which Drill might start as a service. If you are not an HBase user and just kicking the tires, you might use the Drill Sandbox on a single-node cluster (embedded mode). In this case, you need to [start Drill]({{ site.baseurl }}/docs/install-drill/) before performing step 5 of this exercise. On the Drill Sandbox, HBase tables you create will be located in: `/mapr/demo.mapr.com/tables`
 
 You use the CONVERT_TO and CONVERT_FROM functions to convert binary text to readable output. You use the CAST function to convert the binary INT to readable output in step 4 of [Query HBase Tables]({{site.baseurl}}/docs/querying-hbase/#query-hbase-tables). When converting an INT or BIGINT number, having a byte count in the destination/source that does not match the byte count of the number in the VARBINARY source/destination, use CAST.
 
@@ -99,15 +99,16 @@ The `maprdb` format plugin provides access to the `/tables` directory. Use Drill
        SELECT * FROM students;
    The query returns binary results:
   
-        +------------+------------+------------+
-        |  row_key   |  account   |  address   |
-        +------------+------------+------------+
-        | [B@e6d9eb7 | {"name":"QWxpY2U="} | {"state":"Q0E=","street":"MTIzIEJhbGxtZXIgQXY=","zipcode":"MTIzNDU="} |
-        | [B@2823a2b4 | {"name":"Qm9i"} | {"state":"Q0E=","street":"MSBJbmZpbml0ZSBMb29w","zipcode":"MTIzNDU="} |
-        | [B@3b8eec02 | {"name":"RnJhbms="} | {"state":"Q0E=","street":"NDM1IFdhbGtlciBDdA==","zipcode":"MTIzNDU="} |
-        | [B@242895da | {"name":"TWFyeQ=="} | {"state":"Q0E=","street":"NTYgU291dGhlcm4gUGt3eQ==","zipcode":"MTIzNDU="} |
-        +------------+------------+------------+
+        +-------------+-----------------------+---------------------------------------------------------------------------+
+        |  row_key    |  account              |                                address                                    |
+        +-------------+-----------------------+---------------------------------------------------------------------------+
+        | [B@e6d9eb7  | {"name":"QWxpY2U="}   | {"state":"Q0E=","street":"MTIzIEJhbGxtZXIgQXY=","zipcode":"MTIzNDU="}     |
+        | [B@2823a2b4 | {"name":"Qm9i"}       | {"state":"Q0E=","street":"MSBJbmZpbml0ZSBMb29w","zipcode":"MTIzNDU="}     |
+        | [B@3b8eec02 | {"name":"RnJhbms="}   | {"state":"Q0E=","street":"NDM1IFdhbGtlciBDdA==","zipcode":"MTIzNDU="}     |
+        | [B@242895da | {"name":"TWFyeQ=="}   | {"state":"Q0E=","street":"NTYgU291dGhlcm4gUGt3eQ==","zipcode":"MTIzNDU="} |
+        +-------------+-----------------------+---------------------------------------------------------------------------+
         4 rows selected (1.335 seconds)
+
    The Drill output reflects the actual data type of the HBase data, which is binary.
 
 2. Issue the following query, that includes the CONVERT_FROM function, to convert the `students` table to readable data:
@@ -124,14 +125,14 @@ The `maprdb` format plugin provides access to the `/tables` directory. Use Drill
 
     The query returns readable data:
 
-        +------------+------------+------------+------------+------------+
-        | studentid  |    name    |   state    |   street   |  zipcode   |
-        +------------+------------+------------+------------+------------+
-        | student1   | Alice      | CA         | 123 Ballmer Av | 12345      |
-        | student2   | Bob        | CA         | 1 Infinite Loop | 12345      |
-        | student3   | Frank      | CA         | 435 Walker Ct | 12345      |
+        +------------+------------+------------+------------------+------------+
+        | studentid  |    name    |   state    |       street     |  zipcode   |
+        +------------+------------+------------+------------------+------------+
+        | student1   | Alice      | CA         | 123 Ballmer Av   | 12345      |
+        | student2   | Bob        | CA         | 1 Infinite Loop  | 12345      |
+        | student3   | Frank      | CA         | 435 Walker Ct    | 12345      |
         | student4   | Mary       | CA         | 56 Southern Pkwy | 12345      |
-        +------------+------------+------------+------------+------------+
+        +------------+------------+------------+------------------+------------+
         4 rows selected (0.504 seconds)
 
 3. Query the clicks table to see which students visited google.com:
@@ -142,13 +143,13 @@ The `maprdb` format plugin provides access to the `/tables` directory. Use Drill
                CONVERT_FROM(clicks.clickinfo.url, 'UTF8') AS url 
         FROM clicks WHERE clicks.clickinfo.url LIKE '%google%'; 
 
-        +------------+------------+------------+------------+
-        |  clickid   | studentid  |    time    |    url     |
-        +------------+------------+------------+------------+
+        +------------+------------+--------------------------+-----------------------+
+        |  clickid   | studentid  |           time           |         url           |
+        +------------+------------+--------------------------+-----------------------+
         | click1     | student1   | 2014-01-01 12:01:01.0001 | http://www.google.com |
         | click3     | student2   | 2014-01-01 01:02:01.0001 | http://www.google.com |
         | click6     | student3   | 2013-02-01 12:01:01.0001 | http://www.google.com |
-        +------------+------------+------------+------------+
+        +------------+------------+--------------------------+-----------------------+
         3 rows selected (0.294 seconds)
 
 4. Query the clicks table to get the studentid of the student having 100 items. Use CONVERT_FROM to convert the textual studentid and itemtype data, but use CAST to convert the integer quantity.

http://git-wip-us.apache.org/repos/asf/drill/blob/5f6a51af/_docs/query-data/050-querying-hive.md
----------------------------------------------------------------------
diff --git a/_docs/query-data/050-querying-hive.md b/_docs/query-data/050-querying-hive.md
index 080492f..515200a 100644
--- a/_docs/query-data/050-querying-hive.md
+++ b/_docs/query-data/050-querying-hive.md
@@ -18,7 +18,7 @@ To create a Hive table and query it with Drill, complete the following steps:
 
         hive> load data local inpath '/<directory path>/customers.csv' overwrite into table customers;`
   4. Issue `quit` or `exit` to leave the Hive shell.
-  5. Start Drill. Refer to [/docs/install-drill) for instructions.
+  5. Start the Drill shell. 
   6. Issue the following query to Drill to get the first and last names of the first ten customers in the Hive table:  
 
         0: jdbc:drill:schema=hiveremote> SELECT firstname,lastname FROM hiveremote.`customers` limit 10;`

http://git-wip-us.apache.org/repos/asf/drill/blob/5f6a51af/_docs/query-data/060-querying-the-information-schema.md
----------------------------------------------------------------------
diff --git a/_docs/query-data/060-querying-the-information-schema.md b/_docs/query-data/060-querying-the-information-schema.md
index fddb194..7d18120 100644
--- a/_docs/query-data/060-querying-the-information-schema.md
+++ b/_docs/query-data/060-querying-the-information-schema.md
@@ -107,4 +107,4 @@ of those columns:
     | OrderTotal  | Decimal    |
     +-------------+------------+
 
-In this release, Drill disables the DECIMAL data type, including casting to DECIMAL and reading DECIMAL types from Parquet and Hive. [Enable the DECIMAL data type]({{site.baseurl}}/docs/supported-data-types#enabling-the-decimal-type)) if performance is not an issue.
\ No newline at end of file
+In this release, Drill disables the DECIMAL data type, including casting to DECIMAL and reading DECIMAL types from Parquet and Hive. [Enable the DECIMAL data type]({{site.baseurl}}/docs/supported-data-types#enabling-the-decimal-type) if performance is not an issue.
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/drill/blob/5f6a51af/_docs/query-data/070-query-sys-tbl.md
----------------------------------------------------------------------
diff --git a/_docs/query-data/070-query-sys-tbl.md b/_docs/query-data/070-query-sys-tbl.md
index 17041fd..5cab6dc 100644
--- a/_docs/query-data/070-query-sys-tbl.md
+++ b/_docs/query-data/070-query-sys-tbl.md
@@ -13,21 +13,21 @@ system tables that you can query.
 Issue the `SHOW DATABASES` command to view Drill databases.
 
     0: jdbc:drill:zk=10.10.100.113:5181> show databases;
-    +-------------+
-    | SCHEMA_NAME |
-    +-------------+
-    | M7          |
-    | hive.default|
-    | dfs.default |
-    | dfs.root    |
-    | dfs.views   |
-    | dfs.tmp     |
-    | dfs.tpcds   |
-    | sys         |
-    | cp.default  |
-    | hbase       |
+    +--------------------+
+    |      SCHEMA_NAME   |
+    +--------------------+
+    | M7                 |
+    | hive.default       |
+    | dfs.default        |
+    | dfs.root           |
+    | dfs.views          |
+    | dfs.tmp            |
+    | dfs.tpcds          |
+    | sys                |
+    | cp.default         |
+    | hbase              |
     | INFORMATION_SCHEMA |
-    +-------------+
+    +--------------------+
     11 rows selected (0.162 seconds)
 
 Drill returns `sys` in the database results.
@@ -67,13 +67,13 @@ Query the drillbits, version, and options tables in the sys database.
 ###Query the drillbits table.
 
     0: jdbc:drill:zk=10.10.100.113:5181> select * from drillbits;
-    +------------------+------------+--------------+------------+---------+
-    |   host            | user_port | control_port | data_port  |  current|
+    +-------------------+------------+--------------+------------+---------+
+    |   host            |  user_port | control_port | data_port  |  current|
     +-------------------+------------+--------------+------------+--------+
-    | qa-node115.qa.lab | 31010     | 31011        | 31012      | true    |
-    | qa-node114.qa.lab | 31010     | 31011        | 31012      | false   |
-    | qa-node116.qa.lab | 31010     | 31011        | 31012      | false   |
-    +------------+------------+--------------+------------+---------------+
+    | qa-node115.qa.lab | 31010      | 31011        | 31012      | true    |
+    | qa-node114.qa.lab | 31010      | 31011        | 31012      | false   |
+    | qa-node116.qa.lab | 31010      | 31011        | 31012      | false   |
+    +-------------------+------------+--------------+------------+---------+
     3 rows selected (0.146 seconds)
 
   * host   
@@ -94,12 +94,12 @@ query. This Drillbit is the Foreman for the current session.
 ### Query the version table.
 
     0: jdbc:drill:zk=10.10.100.113:5181> select * from version;
-    +------------+----------------+-------------+-------------+------------+
-    | commit_id  | commit_message | commit_time | build_email | build_time |
-    +------------+----------------+-------------+-------------+------------+
-    | 108d29fce3d8465d619d45db5f6f433ca3d97619 | DRILL-1635: Additional fix for validation exceptions. | 14.11.2014 @ 02:32:47 UTC | Unknown    | 14.11.2014 @ 03:56:07 UTC |
-    +------------+----------------+-------------+-------------+------------+
-    1 row selected (0.144 seconds)
+    +-------------------------------------------+--------------------------------------------------------------------+----------------------------+--------------+----------------------------+
+    |                 commit_id                 |                           commit_message                           |        commit_time         | build_email  |         build_time         |
+    +-------------------------------------------+--------------------------------------------------------------------+----------------------------+--------------+----------------------------+
+    | d8b19759657698581cc0d01d7038797952888123  | DRILL-3100: TestImpersonationDisabledWithMiniDFS fails on Windows  | 15.05.2015 @ 05:18:03 UTC  | Unknown      | 15.05.2015 @ 06:52:32 UTC  |
+    +-------------------------------------------+--------------------------------------------------------------------+----------------------------+--------------+----------------------------+
+    1 row selected (0.099 seconds)
   * commit_id  
 The github id of the release you are running. For example, <https://github.com
 /apache/drill/commit/e3ab2c1760ad34bda80141e2c3108f7eda7c9104>
@@ -120,21 +120,22 @@ Drill provides system, session, and boot options that you can query.
 The following example shows a query on the system options:
 
     0: jdbc:drill:zk=10.10.100.113:5181> select * from options where type='SYSTEM' limit 10;
-    +------------+------------+------------+------------+------------+------------+------------+
-    |    name   |   kind    |   type    |  num_val   | string_val |  bool_val  | float_val  |
-    +------------+------------+------------+------------+------------+------------+------------+
-    | exec.max_hash_table_size | LONG       | SYSTEM    | 1073741824 | null     | null      | null      |
-    | planner.memory.max_query_memory_per_node | LONG       | SYSTEM    | 2048       | null     | null      | null      |
-    | planner.join.row_count_estimate_factor | DOUBLE   | SYSTEM    | null      | null      | null      | 1.0       |
-    | planner.affinity_factor | DOUBLE  | SYSTEM    | null      | null      | null       | 1.2      |
-    | exec.errors.verbose | BOOLEAN | SYSTEM    | null      | null      | false      | null     |
-    | planner.disable_exchanges | BOOLEAN   | SYSTEM    | null      | null      | false      | null     |
-    | exec.java_compiler_debug | BOOLEAN    | SYSTEM    | null      | null      | true      | null      |
-    | exec.min_hash_table_size | LONG       | SYSTEM    | 65536     | null      | null      | null       |
-    | exec.java_compiler_janino_maxsize | LONG       | SYSTEM   | 262144    | null      | null      | null      |
-    | planner.enable_mergejoin | BOOLEAN    | SYSTEM    | null      | null      | true      | null       |
-    +------------+------------+------------+------------+------------+------------+------------+
-    10 rows selected (0.334 seconds)  
+    +-------------------------------------------------+----------+---------+----------+-------------+-------------+-----------+------------+
+    |                      name                       |   kind   |  type   |  status  |   num_val   | string_val  | bool_val  | float_val  |
+    +-------------------------------------------------+----------+---------+----------+-------------+-------------+-----------+------------+
+    | drill.exec.functions.cast_empty_string_to_null  | BOOLEAN  | SYSTEM  | DEFAULT  | null        | null        | false     | null       |
+    | drill.exec.storage.file.partition.column.label  | STRING   | SYSTEM  | DEFAULT  | null        | dir         | null      | null       |
+    | exec.errors.verbose                             | BOOLEAN  | SYSTEM  | DEFAULT  | null        | null        | false     | null       |
+    | exec.java_compiler                              | STRING   | SYSTEM  | DEFAULT  | null        | DEFAULT     | null      | null       |
+    | exec.java_compiler_debug                        | BOOLEAN  | SYSTEM  | DEFAULT  | null        | null        | true      | null       |
+    | exec.java_compiler_janino_maxsize               | LONG     | SYSTEM  | DEFAULT  | 262144      | null        | null      | null       |
+    | exec.max_hash_table_size                        | LONG     | SYSTEM  | DEFAULT  | 1073741824  | null        | null      | null       |
+    | exec.min_hash_table_size                        | LONG     | SYSTEM  | DEFAULT  | 65536       | null        | null      | null       |
+    | exec.queue.enable                               | BOOLEAN  | SYSTEM  | DEFAULT  | null        | null        | false     | null       |
+    | exec.queue.large                                | LONG     | SYSTEM  | DEFAULT  | 10          | null        | null      | null       |
+    +-------------------------------------------------+----------+---------+----------+-------------+-------------+-----------+------------+
+    10 rows selected (0.216 seconds)
+
   * name  
 The name of the option.
   * kind  
@@ -151,9 +152,7 @@ The default value, which is true or false; otherwise, null.
 The default value, which is of the double, float, or long double data type;
 otherwise, null.
 
-For information about how to configure Drill system and session options, see[
-Planning and Execution Options]({{ site.baseurl }}/docs/planning-and-execution-options).
+For information about how to configure Drill system and session options, see [Planning and Execution Options]({{ site.baseurl }}/docs/planning-and-execution-options).
 
-For information about how to configure Drill start-up options, see[ Start-Up
-Options]({{ site.baseurl }}/docs/start-up-options).
+For information about how to configure Drill start-up options, see [Start-Up Options]({{ site.baseurl }}/docs/start-up-options).
 

http://git-wip-us.apache.org/repos/asf/drill/blob/5f6a51af/_docs/query-data/query-a-file-system/010-querying-json-files.md
----------------------------------------------------------------------
diff --git a/_docs/query-data/query-a-file-system/010-querying-json-files.md b/_docs/query-data/query-a-file-system/010-querying-json-files.md
index 219f2b1..51f499d 100644
--- a/_docs/query-data/query-a-file-system/010-querying-json-files.md
+++ b/_docs/query-data/query-a-file-system/010-querying-json-files.md
@@ -9,33 +9,16 @@ data. Use SQL syntax to query the sample `JSON` file.
 To view the data in the `employee.json` file, submit the following SQL query
 to Drill:
 
-         0: jdbc:drill:zk=local> SELECT * FROM cp.`employee.json`;
+         0: jdbc:drill:zk=local> SELECT * FROM cp.`employee.json` LIMIT 5;
 
 The query returns the following results:
 
-**Example of partial output**
+    +--------------+----------------------------+---------------------+---------------+--------------+----------------------------+-----------+----------------+-------------+------------------------+----------+----------------+----------------------+-----------------+---------+-----------------------+
+    | employee_id  |         full_name          |     first_name      |   last_name   | position_id  |       position_title       | store_id  | department_id  | birth_date  |       hire_date        |  salary  | supervisor_id  |   education_level    | marital_status  | gender  |    management_role    |
+    +--------------+----------------------------+---------------------+---------------+--------------+----------------------------+-----------+----------------+-------------+------------------------+----------+----------------+----------------------+-----------------+---------+-----------------------+
+    | 1            | Sheri Nowmer               | Sheri               | Nowmer        | 1            | President                  | 0         | 1              | 1961-08-26  | 1994-12-01 00:00:00.0  | 80000.0  | 0              | Graduate Degree      | S               | F       | Senior Management     |
+    | 2            | Derrick Whelply            | Derrick             | Whelply       | 2            | VP Country Manager         | 0         | 1              | 1915-07-03  | 1994-12-01 00:00:00.0  | 40000.0  | 1              | Graduate Degree      | M               | M       | Senior Management     |
+    | 4            | Michael Spence             | Michael             | Spence        | 2            | VP Country Manager         | 0         | 1              | 1969-06-20  | 1998-01-01 00:00:00.0  | 40000.0  | 1              | Graduate Degree      | S               | M       | Senior Management     |
+    | 5            | Maya Gutierrez             | Maya                | Gutierrez     | 2            | VP Country Manager         | 0         | 1              | 1951-05-10  | 1998-01-01 00:00:00.0  | 35000.0  | 1              | Bachelors Degree     | M               | F       | Senior Management     |
 
-    +-------------+------------+------------+------------+-------------+-----------+
-    | employee_id | full_name  | first_name | last_name  | position_id | position_ |
-    +-------------+------------+------------+------------+-------------+-----------+
-    | 1101        | Steve Eurich | Steve      | Eurich     | 16          | Store T |
-    | 1102        | Mary Pierson | Mary       | Pierson    | 16          | Store T |
-    | 1103        | Leo Jones  | Leo        | Jones      | 16          | Store Tem |
-    | 1104        | Nancy Beatty | Nancy      | Beatty     | 16          | Store T |
-    | 1105        | Clara McNight | Clara      | McNight    | 16          | Store  |
-    | 1106        | Marcella Isaacs | Marcella   | Isaacs     | 17          | Stor |
-    | 1107        | Charlotte Yonce | Charlotte  | Yonce      | 17          | Stor |
-    | 1108        | Benjamin Foster | Benjamin   | Foster     | 17          | Stor |
-    | 1109        | John Reed  | John       | Reed       | 17          | Store Per |
-    | 1110        | Lynn Kwiatkowski | Lynn       | Kwiatkowski | 17          | St |
-    | 1111        | Donald Vann | Donald     | Vann       | 17          | Store Pe |
-    | 1112        | William Smith | William    | Smith      | 17          | Store  |
-    | 1113        | Amy Hensley | Amy        | Hensley    | 17          | Store Pe |
-    | 1114        | Judy Owens | Judy       | Owens      | 17          | Store Per |
-    | 1115        | Frederick Castillo | Frederick  | Castillo   | 17          | S |
-    | 1116        | Phil Munoz | Phil       | Munoz      | 17          | Store Per |
-    | 1117        | Lori Lightfoot | Lori       | Lightfoot  | 17          | Store |
-    ...
-    +-------------+------------+------------+------------+-------------+-----------+
-    1,155 rows selected (0.762 seconds)
     0: jdbc:drill:zk=local>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/drill/blob/5f6a51af/_docs/query-data/query-a-file-system/020-querying-parquet-files.md
----------------------------------------------------------------------
diff --git a/_docs/query-data/query-a-file-system/020-querying-parquet-files.md b/_docs/query-data/query-a-file-system/020-querying-parquet-files.md
index b93a914..3731f65 100644
--- a/_docs/query-data/query-a-file-system/020-querying-parquet-files.md
+++ b/_docs/query-data/query-a-file-system/020-querying-parquet-files.md
@@ -8,42 +8,37 @@ that you can query. Use SQL syntax to query the `region.parquet` and
 
 {% include startnote.html %}Your Drill installation location may differ from the examples used here.{% include endnote.html %} 
 
-The examples assume that Drill was installed in embedded mode on your machine following the [Drill in 10 Minutes ]({{ site.baseurl }}/docs/drill-in-10-minutes) tutorial. If you installed Drill in distributed mode, or your `sample-data` directory differs from the location used in the examples, make sure to change the `sample-data` directory to the correct location before you run the queries.
+The examples assume that Drill was [installed in embedded mode]({{ site.baseurl }}/docs/installing-drill-in-embedded-mode). If you installed Drill in distributed mode, or your `sample-data` directory differs from the location used in the examples. Change the `sample-data` directory to the correct location before you run the queries.
 
 ## Region File
 
-If you followed the Apache Drill in 10 Minutes instructions to install Drill
-in embedded mode, the path to the parquet file varies between operating
-systems.
-
 To view the data in the `region.parquet` file, issue the query appropriate for
 your operating system:
 
   * Linux  
     
-        SELECT * FROM dfs.`/opt/drill/apache-drill-0.4.0-incubating/sample-data/region.parquet`;
+        SELECT * FROM dfs.`/opt/drill/apache-drill-1.0.0/sample-data/region.parquet`;
 
   * Mac OS X  
         
-        SELECT * FROM dfs.`/Users/max/drill/apache-drill-0.4.0-incubating/sample-data/region.parquet`;
+        SELECT * FROM dfs.`/Users/max/drill/apache-drill-1.0.0/sample-data/region.parquet`;
 
   * Windows  
     
-        SELECT * FROM dfs.`C:\drill\apache-drill-0.4.0-incubating\sample-data\region.parquet`;
+        SELECT * FROM dfs.`C:\drill\apache-drill-1.0.0\sample-data\region.parquet`;
 
 The query returns the following results:
 
-    +------------+------------+
-    |   EXPR$0   |   EXPR$1   |
-    +------------+------------+
-    | AFRICA     | lar deposits. blithely final packages cajole. regular waters ar |
-    | AMERICA    | hs use ironic, even requests. s |
-    | ASIA       | ges. thinly even pinto beans ca |
-    | EUROPE     | ly final courts cajole furiously final excuse |
-    | MIDDLE EAST | uickly special accounts cajole carefully blithely close reques |
-    +------------+------------+
-    5 rows selected (0.165 seconds)
-    0: jdbc:drill:zk=local>
+    +--------------+--------------+-----------------------+
+    | R_REGIONKEY  |    R_NAME    |       R_COMMENT       |
+    +--------------+--------------+-----------------------+
+    | 0            | AFRICA       | lar deposits. blithe  |
+    | 1            | AMERICA      | hs use ironic, even   |
+    | 2            | ASIA         | ges. thinly even pin  |
+    | 3            | EUROPE       | ly final courts cajo  |
+    | 4            | MIDDLE EAST  | uickly special accou  |
+    +--------------+--------------+-----------------------+
+    5 rows selected (0.272 seconds)
 
 ## Nation File
 
@@ -56,46 +51,45 @@ your operating system:
 
   * Linux  
   
-        SELECT * FROM dfs.`/opt/drill/apache-drill-0.4.0-incubating/sample-data/nation.parquet`;
+        SELECT * FROM dfs.`/opt/drill/apache-drill-1.0.0/sample-data/nation.parquet`;
 
   * Mac OS X  
 
-        SELECT * FROM dfs.`/Users/max/drill/apache-drill-0.4.0-incubating/sample-data/nation.parquet`;
+        SELECT * FROM dfs.`/Users/max/drill/apache-drill-1.0.0-incubating/sample-data/nation.parquet`;
 
   * Windows  
 
-        SELECT * FROM dfs.`C:\drill\apache-drill-0.4.0-incubating\sample-data\nation.parquet`;
+        SELECT * FROM dfs.`C:\drill\apache-drill-1.0.0-incubating\sample-data\nation.parquet`;
 
 The query returns the following results:
 
-    +------------+------------+------------+------------+
-    |   EXPR$0   |   EXPR$1   |   EXPR$2   |   EXPR$3   |
-    +------------+------------+------------+------------+
-    | 0          | 0          | ALGERIA    |  haggle. carefully final deposits det |
-    | 1          | 1          | ARGENTINA  | al foxes promise slyly according to t |
-    | 2          | 1          | BRAZIL     | y alongside of the pending deposits.  |
-    | 3          | 1          | CANADA     | eas hang ironic, silent packages. sly |
-    | 4          | 4          | EGYPT      | y above the carefully unusual theodol |
-    | 5          | 0          | ETHIOPIA   | ven packages wake quickly. regu |
-    | 6          | 3          | FRANCE     | refully final requests. regular, iron |
-    | 7          | 3          | GERMANY    | l platelets. regular accounts x-ray:  |
-    | 8          | 2          | INDIA      | ss excuses cajole slyly across the pa |
-    | 9          | 2          | INDONESIA  |  slyly express asymptotes. regular de |
-    | 10         | 4          | IRAN       | efully alongside of the slyly final d |
-    | 11         | 4          | IRAQ       | nic deposits boost atop the quickly f |
-    | 12         | 2          | JAPAN      | ously. final, express gifts cajole a |
-    | 13         | 4          | JORDAN     | ic deposits are blithely about the ca |
-    | 14         | 0          | KENYA      |  pending excuses haggle furiously dep |
-    | 15         | 0          | MOROCCO    | rns. blithely bold courts among the c |
-    | 16         | 0          | MOZAMBIQUE | s. ironic, unusual asymptotes wake bl |
-    | 17         | 1          | PERU       | platelets. blithely pending dependenc |
-    | 18         | 2          | CHINA      | c dependencies. furiously express not |
-    | 19         | 3          | ROMANIA    | ular asymptotes are about the furious |
-    | 20         | 4          | SAUDI ARABIA | ts. silent requests haggle. closely |
-    | 21         | 2          | VIETNAM    | hely enticingly express accounts. eve |
-    | 22         | 3          | RUSSIA     |  requests against the platelets use n |
-    | 23         | 3          | UNITED KINGDOM | eans boost carefully special requ |
-    | 24         | 1          | UNITED STATES | y final packages. slow foxes cajol |
-    +------------+------------+------------+------------+
-    25 rows selected (2.401 seconds)
-    0: jdbc:drill:zk=local>
\ No newline at end of file
+    +--------------+-----------------+--------------+-----------------------+
+    | N_NATIONKEY  |     N_NAME      | N_REGIONKEY  |       N_COMMENT       |
+    +--------------+-----------------+--------------+-----------------------+
+    | 0            | ALGERIA         | 0            |  haggle. carefully f  |
+    | 1            | ARGENTINA       | 1            | al foxes promise sly  |
+    | 2            | BRAZIL          | 1            | y alongside of the p  |
+    | 3            | CANADA          | 1            | eas hang ironic, sil  |
+    | 4            | EGYPT           | 4            | y above the carefull  |
+    | 5            | ETHIOPIA        | 0            | ven packages wake qu  |
+    | 6            | FRANCE          | 3            | refully final reques  |
+    | 7            | GERMANY         | 3            | l platelets. regular  |
+    | 8            | INDIA           | 2            | ss excuses cajole sl  |
+    | 9            | INDONESIA       | 2            |  slyly express asymp  |
+    | 10           | IRAN            | 4            | efully alongside of   |
+    | 11           | IRAQ            | 4            | nic deposits boost a  |
+    | 12           | JAPAN           | 2            | ously. final, expres  |
+    | 13           | JORDAN          | 4            | ic deposits are blit  |
+    | 14           | KENYA           | 0            |  pending excuses hag  |
+    | 15           | MOROCCO         | 0            | rns. blithely bold c  |
+    | 16           | MOZAMBIQUE      | 0            | s. ironic, unusual a  |
+    | 17           | PERU            | 1            | platelets. blithely   |
+    | 18           | CHINA           | 2            | c dependencies. furi  |
+    | 19           | ROMANIA         | 3            | ular asymptotes are   |
+    | 20           | SAUDI ARABIA    | 4            | ts. silent requests   |
+    | 21           | VIETNAM         | 2            | hely enticingly expr  |
+    | 22           | RUSSIA          | 3            |  requests against th  |
+    | 23           | UNITED KINGDOM  | 3            | eans boost carefully  |
+    | 24           | UNITED STATES   | 1            | y final packages. sl  |
+    +--------------+-----------------+--------------+-----------------------+
+    25 rows selected (0.102 seconds)

http://git-wip-us.apache.org/repos/asf/drill/blob/5f6a51af/_docs/query-data/query-a-file-system/030-querying-plain-text-files.md
----------------------------------------------------------------------
diff --git a/_docs/query-data/query-a-file-system/030-querying-plain-text-files.md b/_docs/query-data/query-a-file-system/030-querying-plain-text-files.md
index 1fd9d84..8924835 100644
--- a/_docs/query-data/query-a-file-system/030-querying-plain-text-files.md
+++ b/_docs/query-data/query-a-file-system/030-querying-plain-text-files.md
@@ -44,19 +44,19 @@ records:
 Drill recognizes each row as an array of values and returns one column for
 each row.
 
-        0: jdbc:drill:zk=local> select * from dfs.`/Users/brumsby/drill/plays.csv`;
+    0: jdbc:drill:zk=local> select * from dfs.`/Users/brumsby/drill/plays.csv`;
  
-    +------------+
-    |  columns   |
-    +------------+
-    | ["1599","As You Like It"] |
-    | ["1601","Twelfth Night"] |
-    | ["1594","Comedy of Errors"] |
-    | ["1595","Romeo and Juliet"] |
+    +-----------------------------------+
+    |              columns              |
+    +-----------------------------------+
+    | ["1599","As You Like It"]         |
+    | ["1601","Twelfth Night"]          |
+    | ["1594","Comedy of Errors"]       |
+    | ["1595","Romeo and Juliet"]       |
     | ["1596","The Merchant of Venice"] |
-    | ["1610","The Tempest"] |
-    | ["1599","Hamlet"] |
-    +------------+
+    | ["1610","The Tempest"]            |
+    | ["1599","Hamlet"]                 |
+    +-----------------------------------+
     7 rows selected (0.089 seconds)
 
 ## Columns[n] Syntax
@@ -67,17 +67,17 @@ based index, so the first column is column `0`.)
 
     0: jdbc:drill:zk=local> select columns[0], columns[1] from dfs.`/Users/brumsby/drill/plays.csv`;
  
-    +------------+------------+
-    |   EXPR$0   |   EXPR$1   |
-    +------------+------------+
-    | 1599       | As You Like It |
-    | 1601       | Twelfth Night |
-    | 1594       | Comedy of Errors |
-    | 1595       | Romeo and Juliet |
+    +------------+------------------------+
+    |   EXPR$0   |         EXPR$1         |
+    +------------+------------------------+
+    | 1599       | As You Like It         |
+    | 1601       | Twelfth Night          |
+    | 1594       | Comedy of Errors       |
+    | 1595       | Romeo and Juliet       |
     | 1596       | The Merchant of Venice |
-    | 1610       | The Tempest |
-    | 1599       | Hamlet     |
-    +------------+------------+
+    | 1610       | The Tempest            |
+    | 1599       | Hamlet                 |
+    +------------+------------------------+
     7 rows selected (0.137 seconds)
 
 You can use aliases to return meaningful column names. Note that `YEAR` is a
@@ -86,17 +86,17 @@ reserved word, so the `Year` alias must be enclosed by back ticks.
     0: jdbc:drill:zk=local> select columns[0] as `Year`, columns[1] as Play 
     from dfs.`/Users/brumsby/drill/plays.csv`;
  
-    +------------+------------+
-    |    Year    |    Play    |
-    +------------+------------+
-    | 1599       | As You Like It |
-    | 1601       | Twelfth Night |
-    | 1594       | Comedy of Errors |
-    | 1595       | Romeo and Juliet |
+    +------------+------------------------+
+    |    Year    |    Play                |
+    +------------+------------------------+
+    | 1599       | As You Like It         |
+    | 1601       | Twelfth Night          |
+    | 1594       | Comedy of Errors       |
+    | 1595       | Romeo and Juliet       |
     | 1596       | The Merchant of Venice |
-    | 1610       | The Tempest |
-    | 1599       | Hamlet     |
-    +------------+------------+
+    | 1610       | The Tempest            |
+    | 1599       | Hamlet                 |
+    +------------+------------------------+
     7 rows selected (0.113 seconds)
 
 You cannot refer to the aliases in subsequent clauses of the query. Use the
@@ -106,12 +106,12 @@ example:
     0: jdbc:drill:zk=local> select columns[0] as `Year`, columns[1] as Play 
     from dfs.`/Users/brumsby/drill/plays.csv` where columns[0]>1599;
  
-    +------------+------------+
-    |    Year    |    Play    |
-    +------------+------------+
+    +------------+---------------+
+    |    Year    |      Play     |
+    +------------+---------------+
     | 1601       | Twelfth Night |
-    | 1610       | The Tempest |
-    +------------+------------+
+    | 1610       | The Tempest   |
+    +------------+---------------+
     2 rows selected (0.201 seconds)
 
 Note that the restriction with the use of aliases applies to queries against

http://git-wip-us.apache.org/repos/asf/drill/blob/5f6a51af/_docs/query-data/query-a-file-system/040-querying-directories.md
----------------------------------------------------------------------
diff --git a/_docs/query-data/query-a-file-system/040-querying-directories.md b/_docs/query-data/query-a-file-system/040-querying-directories.md
index ef2def9..1a55b75 100644
--- a/_docs/query-data/query-a-file-system/040-querying-directories.md
+++ b/_docs/query-data/query-a-file-system/040-querying-directories.md
@@ -16,20 +16,20 @@ the "union" of the two files, ordered by the first column:
     0: jdbc:drill:zk=local> select columns[0] as `Year`, columns[1] as Play 
     from dfs.`/Users/brumsby/drill/testdata` order by 1;
  
-    +------------+------------+
-    |    Year    |    Play    |
-    +------------+------------+
-    | 1594       | Comedy of Errors |
-    | 1595       | Romeo and Juliet |
+    +------------+------------------------+
+    |    Year    |          Play          |
+    +------------+------------------------+
+    | 1594       | Comedy of Errors       |
+    | 1595       | Romeo and Juliet       |
     | 1596       | The Merchant of Venice |
-    | 1599       | As You Like It |
-    | 1599       | Hamlet     |
-    | 1601       | Twelfth Night |
-    | 1606       | Macbeth    |
-    | 1606       | King Lear  |
-    | 1609       | The Winter's Tale |
-    | 1610       | The Tempest |
-    +------------+------------+
+    | 1599       | As You Like It         |
+    | 1599       | Hamlet                 |
+    | 1601       | Twelfth Night          |
+    | 1606       | Macbeth                |
+    | 1606       | King Lear              |
+    | 1609       | The Winter's Tale      |
+    | 1610       | The Tempest            |
+    +------------+------------------------+
     10 rows selected (0.296 seconds)
 
 You can drill down further and automatically query subdirectories as well. For
@@ -65,11 +65,11 @@ files inside the subdirectory named `2013`. The variable `dir0` refers to the
 first level down from logs, `dir1` to the next level, and so on.
 
     0: jdbc:drill:> use bob.logdata;
-    +------------+------------+
-    |     ok     |  summary   |
-    +------------+------------+
+    +------------+-----------------------------------------+
+    |     ok     |              summary                    |
+    +------------+-----------------------------------------+
     | true       | Default schema changed to 'bob.logdata' |
-    +------------+------------+
+    +------------+-----------------------------------------+
     1 row selected (0.305 seconds)
  
     0: jdbc:drill:> select * from logs where dir0='2013' limit 10;

http://git-wip-us.apache.org/repos/asf/drill/blob/5f6a51af/_docs/query-data/querying-complex-data/005-querying-complex-data-introduction.md
----------------------------------------------------------------------
diff --git a/_docs/query-data/querying-complex-data/005-querying-complex-data-introduction.md b/_docs/query-data/querying-complex-data/005-querying-complex-data-introduction.md
index a6a8c84..099e047 100644
--- a/_docs/query-data/querying-complex-data/005-querying-complex-data-introduction.md
+++ b/_docs/query-data/querying-complex-data/005-querying-complex-data-introduction.md
@@ -5,12 +5,12 @@ parent: "Querying Complex Data"
 Apache Drill queries do not require prior knowledge of the actual data you are
 trying to access, regardless of its source system or its schema and data
 types. The sweet spot for Apache Drill is a SQL query workload against
-"complex data": data made up of various types of records and fields, rather
+*complex data*: data made up of various types of records and fields, rather
 than data in a recognizable relational form (discrete rows and columns). Drill
 is capable of discovering the form of the data when you submit the query.
 Nested data formats such as JSON (JavaScript Object Notation) files and
 Parquet files are not only _accessible_: Drill provides special operators and
-functions that you can use to _drill down _into these files and ask
+functions that you can use to _drill down_ into these files and ask
 interesting analytic questions.
 
 These operators and functions include:

http://git-wip-us.apache.org/repos/asf/drill/blob/5f6a51af/_docs/sql-reference/090-sql-extensions.md
----------------------------------------------------------------------
diff --git a/_docs/sql-reference/090-sql-extensions.md b/_docs/sql-reference/090-sql-extensions.md
index a30961c..ed97611 100644
--- a/_docs/sql-reference/090-sql-extensions.md
+++ b/_docs/sql-reference/090-sql-extensions.md
@@ -4,7 +4,7 @@ parent: "SQL Reference"
 ---
 Drill extends SQL to work with Hadoop-scale data and to explore smaller-scale data in ways not possible with SQL. Using intuitive SQL extensions you work with self-describing data and complex data types. Extensions to SQL include capabilities for exploring self-describing data, such as files and HBase, directly in the native format.
 
-Drill provides language support for pointing to [storage plugin]() interfaces that Drill uses to interact with data sources. Use the name of a storage plugin to specify a file system *database* as a prefix in queries when you refer to objects across databases. Query files, including compressed .gz files, and [directories]({{ site.baseurl }}/docs/querying-directories), as you would query an SQL table. You can query [multiple files in a directory]({{ site.baseurl }}/docs/querying-directories).
+Drill provides language support for pointing to [storage plugin]({{site.baseurl}}/docs/connect-a-data-source-introduction) interfaces that Drill uses to interact with data sources. Use the name of a storage plugin to specify a file system *database* as a prefix in queries when you refer to objects across databases. Query files, including compressed .gz files, and [directories]({{ site.baseurl }}/docs/querying-directories), as you would query an SQL table. You can query multiple files in a directory.
 
 Drill extends the SELECT statement for reading complex, multi-structured data. The extended CREATE TABLE AS SELECT provides the capability to write data of complex/multi-structured data types. Drill extends the [lexical rules](http://drill.apache.org/docs/lexical-structure) for working with files and directories, such as using back ticks for including file names, directory names, and reserved words in queries. Drill syntax supports using the file system as a persistent store for query profiles and diagnostic information.
 
@@ -13,14 +13,14 @@ Drill extends the SELECT statement for reading complex, multi-structured data. T
 Drill supports Hive and HBase as a plug-and-play data source. Drill can read tables created in Hive that use [data types compatible]({{ site.baseurl }}/docs/hive-to-drill-data-type-mapping) with Drill.  You can query Hive tables without modifications. You can query self-describing data without requiring metadata definitions in the Hive metastore. Primitives, such as JOIN, support columnar operation. 
 
 ## Extensions for JSON-related Data Sources
-For reading JSON numbers as DOUBLE or reading all JSON data as VARCHAR, use a [store.json option](http://drill.apache.org/docs/handling-different-data-types/#reading-numbers-of-different-types-from-json). Drill extends SQL to provide access to repeating values in arrays and arrays within arrays (array indexes). You can use these extensions to reach into deeply nested data. Drill extensions use standard JavaScript notation for referencing data elements in a hierarchy, as shown in ["Analyzing JSON."]({{ site.baseurl }}/docs/json-data-model#analyzing-json)
+For reading JSON numbers as DOUBLE or reading all JSON data as VARCHAR, use a [store.json option]({{site.baseurl}}/docs/handling-different-data-types/#reading-numbers-of-different-types-from-json). Drill extends SQL to provide access to repeating values in arrays and arrays within arrays (array indexes). You can use these extensions to reach into deeply nested data. Drill extensions use standard JavaScript notation for referencing data elements in a hierarchy, as shown in ["Analyzing JSON."]({{ site.baseurl }}/docs/json-data-model#analyzing-json)
 
 ## Extensions for Parquet Data Sources
 SQL does not support all Parquet data types, so Drill infers data types in many instances. Users [cast] ({{ site.baseurl }}/docs/sql-functions) data types to ensure getting a particular data type. Drill offers more liberal casting capabilities than SQL for Parquet conversions if the Parquet data is of a logical type. You can use the default dfs storage plugin installed with Drill for reading and writing Parquet files as shown in the section, [“Parquet Format.”]({{ site.baseurl }}/docs/parquet-format)
 
 
 ## Extensions for Text Data Sources
-Drill handles plain text files and directories like standard SQL tables and can infer knowledge about the schema of the data. Drill extends SQL to handle structured file types, such as comma separated values (CSV) files. An extension of the SELECT statement provides COLUMNS[n] syntax for accessing CSV rows in a readable format, as shown in ["COLUMNS[n] Syntax."]({{ site.baseurl }}/docs/querying-plain-text-files)
+Drill handles plain text files and directories like standard SQL tables and can infer knowledge about the schema of the data. Drill extends SQL to handle structured file types, such as comma separated values (CSV) files. An extension of the SELECT statement provides COLUMNS[n] syntax for accessing CSV rows in a readable format, as shown in ["COLUMNS[n] Syntax."]({{ site.baseurl }}/docs/querying-plain-text-files/#columns[n]-syntax)
 
 ## SQL Function Extensions
 Drill provides the following functions for analyzing nested data.
@@ -34,7 +34,7 @@ Drill provides the following functions for analyzing nested data.
 
 ## Other Extensions
 
-The [`sys` database system tables]() provide port, version, and option information.  For example, Drill connects to a random node. You query the sys table to know where you are connected:
+The [`sys` tables](/docs/querying-system-tables/) provide port, version, and option information.  For example, Drill connects to a random node. You query the sys table to know where you are connected:
 
     SELECT host FROM sys.drillbits WHERE `current` = true;
     +------------+

http://git-wip-us.apache.org/repos/asf/drill/blob/5f6a51af/_docs/sql-reference/data-types/010-supported-data-types.md
----------------------------------------------------------------------
diff --git a/_docs/sql-reference/data-types/010-supported-data-types.md b/_docs/sql-reference/data-types/010-supported-data-types.md
index 5d7fa86..7ffa85e 100644
--- a/_docs/sql-reference/data-types/010-supported-data-types.md
+++ b/_docs/sql-reference/data-types/010-supported-data-types.md
@@ -32,12 +32,12 @@ To enable the DECIMAL type, set the `planner.enable_decimal_data_type` option to
 
      ALTER SYSTEM SET `planner.enable_decimal_data_type` = true;
 
-     +------------+------------+
-     |     ok     |  summary   |
-     +------------+------------+
-     | true       | planner.enable_decimal_data_type updated. |
-     +------------+------------+
-     1 row selected (1.191 seconds)
+    +-------+--------------------------------------------+
+    |  ok   |                  summary                   |
+    +-------+--------------------------------------------+
+    | true  | planner.enable_decimal_data_type updated.  |
+    +-------+--------------------------------------------+
+    1 row selected (0.08 seconds)
 
 ## Casting and Converting Data Types
 
@@ -94,13 +94,13 @@ In a textual file, such as CSV, Drill interprets every field as a VARCHAR, as pr
   Casts data from one data type to another.
 * [CONVERT_TO and CONVERT_FROM]({{ site.baseurl }}/docs/data-type-conversion#convert_to-and-convert_from)  
   Converts data, including binary data, from one data type to another.
-* [TO_CHAR]()  
+* [TO_CHAR]({{ site.baseurl }}/docs/data-type-conversion/#to_char)  
   Converts a TIMESTAMP, INTERVALDAY/INTERVALYEAR, INTEGER, DOUBLE, or DECIMAL to a string.
-* [TO_DATE]()  
+* [TO_DATE]({{ site.baseurl }}/docs/data-type-conversion/#to_date)  
   Converts a string to DATE.
-* [TO_NUMBER]()  
+* [TO_NUMBER]({{ site.baseurl }}/docs/data-type-conversion/#to_number)  
   Converts a string to a DECIMAL.
-* [TO_TIMESTAMP]()  
+* [TO_TIMESTAMP]({{ site.baseurl }}/docs/data-type-conversion/#to_timestamp)  
   Converts a string to TIMESTAMP.
 
 If the SELECT statement includes a WHERE clause that compares a column of an unknown data type, cast both the value of the column and the comparison value in the WHERE clause.

http://git-wip-us.apache.org/repos/asf/drill/blob/5f6a51af/_docs/sql-reference/nested-data-functions/010-flatten.md
----------------------------------------------------------------------
diff --git a/_docs/sql-reference/nested-data-functions/010-flatten.md b/_docs/sql-reference/nested-data-functions/010-flatten.md
index a128640..a0e2573 100644
--- a/_docs/sql-reference/nested-data-functions/010-flatten.md
+++ b/_docs/sql-reference/nested-data-functions/010-flatten.md
@@ -52,9 +52,9 @@ row contains an array of four categories:
     0: jdbc:drill:zk=local> select distinct name, hours, categories 
     from dfs.yelp.`yelp_academic_dataset_business.json` 
     where name ='zpizza';
-    +------------+------------+------------+
-    |    name    |   hours    | categories |
-    +------------+------------+------------+
+    +------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-----------------------------------------------+
+    |    name    |   hours                                                                                                                                                                                                                                                                                                         | categories                                    |
+    +------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-----------------------------------------------+
     | zpizza     | {"Tuesday":{"close":"22:00","open":"10:00"},"Friday":{"close":"23:00","open":"10:00"},"Monday":{"close":"22:00","open":"10:00"},"Wednesday":{"close":"22:00","open":"10:00"},"Thursday":{"close":"22:00","open":"10:00"},"Sunday":{"close":"22:00","open":"10:00"},"Saturday":{"close":"23:00","open":"10:00"}} | ["Gluten-Free","Pizza","Vegan","Restaurants"] |
 
 The FLATTEN function can operate on this single row and return multiple rows,
@@ -98,5 +98,5 @@ the categories array, then run a COUNT function on the flattened result:
     +---------------|------------+
 
 A common use case for FLATTEN is its use in conjunction with the
-[KVGEN]({{ site.baseurl }}/docs/flatten-function) function as shown in the section, ["JSON Data Model"]({{ site.baseurl }}/docs/json-data-model/).
+KVGEN function as shown in the section, ["JSON Data Model"]({{ site.baseurl }}/docs/json-data-model/).
 

http://git-wip-us.apache.org/repos/asf/drill/blob/5f6a51af/_docs/sql-reference/nested-data-functions/020-kvgen.md
----------------------------------------------------------------------
diff --git a/_docs/sql-reference/nested-data-functions/020-kvgen.md b/_docs/sql-reference/nested-data-functions/020-kvgen.md
index 1e01b16..42511e8 100644
--- a/_docs/sql-reference/nested-data-functions/020-kvgen.md
+++ b/_docs/sql-reference/nested-data-functions/020-kvgen.md
@@ -73,7 +73,7 @@ a map with a wide set of columns into an array of key-value pairs.
 
 In turn, you can write analytic queries that return a subset of the generated
 keys or constrain the keys in some way. For example, you can use the
-[FLATTEN]({{ site.baseurl }}/docs/flatten-function) function to break the
+[FLATTEN]({{ site.baseurl }}/docs/flatten) function to break the
 array down into multiple distinct rows and further query those rows.
 
 For example, assume that a JSON file named `simplemaps.json` contains this data:  
@@ -92,8 +92,7 @@ KVGEN would operate on this data as follows:
 	+------------+
 	2 rows selected (0.201 seconds)
 
-Applying the [FLATTEN]({{ site.baseurl }}/docs/flatten-function) function to
-this data would return:
+Applying the FLATTEN function to this data would return:
 
     {"key": "a", "value": "valA"}
     {"key": "b", "value": "valB"}

http://git-wip-us.apache.org/repos/asf/drill/blob/5f6a51af/_docs/sql-reference/sql-functions/010-math-and-trig.md
----------------------------------------------------------------------
diff --git a/_docs/sql-reference/sql-functions/010-math-and-trig.md b/_docs/sql-reference/sql-functions/010-math-and-trig.md
index e653b34..fc06932 100644
--- a/_docs/sql-reference/sql-functions/010-math-and-trig.md
+++ b/_docs/sql-reference/sql-functions/010-math-and-trig.md
@@ -12,7 +12,7 @@ Drill supports the math functions shown in the following table of math functions
 
 \* Not supported in this release.
 
-Exceptions are the LSHIFT and RSHIFT functions, which take all types except FLOAT and DOUBLE types. DEGREES, EXP, RADIANS, and the multiple LOG functions take the input types in this list plus the DECIMAL type:
+Exceptions are the LSHIFT and RSHIFT functions, which take all types except FLOAT and DOUBLE types. DEGREES, EXP, RADIANS, and the multiple LOG functions take the input types in this list plus the DECIMAL type. In this release, Drill disables the DECIMAL data type. To enable the DECIMAL type, set the `planner.enable_decimal_data_type` option to `true`.
 
 ## Table of Math Functions
 
@@ -184,12 +184,12 @@ Get the natural log of 7.5.
 
     SELECT LOG(7.5) FROM sys.version;
 
-    +------------+
-    |   EXPR$0   |
-    +------------+
-    | 2.0149030205422647 |
-    +------------+
-    1 row selected (0.063 seconds)
+    +---------------------+
+    |       EXPR$0        |
+    +---------------------+
+    | 2.0149030205422647  |
+    +---------------------+
+    1 row selected (0.139 seconds)
 
 ## Trig Functions
 


[09/31] drill git commit: sqlline to Drill shell

Posted by ts...@apache.org.
sqlline to Drill shell


Project: http://git-wip-us.apache.org/repos/asf/drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/drill/commit/6089ab01
Tree: http://git-wip-us.apache.org/repos/asf/drill/tree/6089ab01
Diff: http://git-wip-us.apache.org/repos/asf/drill/diff/6089ab01

Branch: refs/heads/gh-pages
Commit: 6089ab014e2d15d450acfd999beb51ae0d62a6bd
Parents: cd0a0e9
Author: Kristine Hahn <kh...@maprtech.com>
Authored: Sun May 17 17:35:26 2015 -0700
Committer: Kristine Hahn <kh...@maprtech.com>
Committed: Sun May 17 17:35:26 2015 -0700

----------------------------------------------------------------------
 _docs/075-getting-query-information.md                    |  4 +---
 .../035-plugin-configuration-introduction.md              | 10 +++++-----
 .../030-starting-drill-on-linux-and-mac-os-x.md           |  2 +-
 .../sql-functions/020-data-type-conversion.md             |  2 +-
 _docs/tutorials/020-drill-in-10-minutes.md                |  4 ++--
 .../tutorials/030-analyzing-the-yelp-academic-dataset.md  |  4 ++--
 _docs/tutorials/050-analyzing-highly-dynamic-datasets.md  |  4 ++--
 .../020-getting-to-know-the-drill-sandbox.md              |  2 +-
 .../030-lesson-1-learn-about-the-data-set.md              | 10 +++++-----
 9 files changed, 20 insertions(+), 22 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/drill/blob/6089ab01/_docs/075-getting-query-information.md
----------------------------------------------------------------------
diff --git a/_docs/075-getting-query-information.md b/_docs/075-getting-query-information.md
index 58e0a62..57c1678 100644
--- a/_docs/075-getting-query-information.md
+++ b/_docs/075-getting-query-information.md
@@ -2,9 +2,7 @@
 title: "Getting Query Information"
 parent: "Query Audit Logging"
 ---
-The query log provides audit log functionality for the queries executed by various drillbits in the cluster. To access the query log, go to `sqlline_queries.json` file in the `log` directory of the Drill installation. The log records important information about queries executed on the Drillbit where Drill runs. The log includes query text, start time, end time, user, status, schema, and the query id.
-
-You can query one of the following files, depending on whether you run Drill in embedded or distributed mode, to get audit logging information.:
+The query log provides audit log functionality for the queries executed by various drillbits in the cluster. The log records important information about queries executed on the Drillbit where Drill runs. The log includes query text, start time, end time, user, status, schema, and the query id. You can query one of the following log files, depending on whether you run Drill in embedded or distributed mode, to get audit logging information.:
 
 * `sqlline_queries.json` (embedded mode) 
 * `drillbit_queries.json` (distributed mode)

http://git-wip-us.apache.org/repos/asf/drill/blob/6089ab01/_docs/connect-a-data-source/035-plugin-configuration-introduction.md
----------------------------------------------------------------------
diff --git a/_docs/connect-a-data-source/035-plugin-configuration-introduction.md b/_docs/connect-a-data-source/035-plugin-configuration-introduction.md
index 0cfc206..44e2e11 100644
--- a/_docs/connect-a-data-source/035-plugin-configuration-introduction.md
+++ b/_docs/connect-a-data-source/035-plugin-configuration-introduction.md
@@ -78,13 +78,13 @@ The following table describes the attributes you configure for storage plugins i
   </tr>
   <tr>
     <td>"formats"</td>
-    <td>"psv"<br>"csv"<br>"tsv"<br>"parquet"<br>"json"<br>"avro"<br>"maprdb"*</td>
+    <td>"psv"<br>"csv"<br>"tsv"<br>"parquet"<br>"json"<br>"avro"<br>"maprdb" *</td>
     <td>yes</td>
     <td>One or more file formats of data Drill can read. Drill can implicitly detect some file formats based on the file extension or the first few bits of data within the file, but you need to configure an option for others.</td>
   </tr>
   <tr>
     <td>"formats" . . . "type"</td>
-    <td>"text"<br>"parquet"<br>"json"<br>"maprdb"</td>
+    <td>"text"<br>"parquet"<br>"json"<br>"maprdb" *</td>
     <td>yes</td>
     <td>The type of the format specified. For example, you can define two formats, csv and psv, as type "Text", but having different delimiters. Drill enables the maprdb plugin if you define the maprdb type.</td>
   </tr>
@@ -102,7 +102,7 @@ The following table describes the attributes you configure for storage plugins i
   </tr>
 </table>
 
-\* Only appears when you install Drill on a cluster using the mapr-drill package.
+\* Pertains only to distributed drill installations using the mapr-drill package.
 
 The configuration of other attributes, such as `size.calculator.enabled` in the hbase plugin and `configProps` in the hive plugin, are implementation-dependent and beyond the scope of this document.
 
@@ -119,10 +119,10 @@ For example, using uppercase letters in the query after defining the storage plu
 
 Drill provides a REST API that you can use to create a storage plugin. Use an HTTP POST and pass two properties:
 
-* name
+* name  
   The plugin name. 
 
-* config
+* config  
   The storage plugin definition as you would enter it in the Web UI.
 
 For example, this command creates a plugin named myplugin for reading files of an unknown type located on the root of the file system:

http://git-wip-us.apache.org/repos/asf/drill/blob/6089ab01/_docs/install/installing-drill-in-embedded-mode/030-starting-drill-on-linux-and-mac-os-x.md
----------------------------------------------------------------------
diff --git a/_docs/install/installing-drill-in-embedded-mode/030-starting-drill-on-linux-and-mac-os-x.md b/_docs/install/installing-drill-in-embedded-mode/030-starting-drill-on-linux-and-mac-os-x.md
index 2bbfede..cdbdf20 100644
--- a/_docs/install/installing-drill-in-embedded-mode/030-starting-drill-on-linux-and-mac-os-x.md
+++ b/_docs/install/installing-drill-in-embedded-mode/030-starting-drill-on-linux-and-mac-os-x.md
@@ -8,7 +8,7 @@ Start the Drill shell using the `drill-embedded` command. The command uses a jdb
 
         cd apache-drill-1.0.0  
 
-2. Issue the following command to launch SQLLine:
+2. Issue the following command to start the Drill shell:
 
         bin/drill-embedded  
 

http://git-wip-us.apache.org/repos/asf/drill/blob/6089ab01/_docs/sql-reference/sql-functions/020-data-type-conversion.md
----------------------------------------------------------------------
diff --git a/_docs/sql-reference/sql-functions/020-data-type-conversion.md b/_docs/sql-reference/sql-functions/020-data-type-conversion.md
index 3ad20f7..239da85 100644
--- a/_docs/sql-reference/sql-functions/020-data-type-conversion.md
+++ b/_docs/sql-reference/sql-functions/020-data-type-conversion.md
@@ -731,7 +731,7 @@ Currently Drill does not support conversion of a date, time, or timestamp from o
 
         export DRILL_JAVA_OPTS="-Xms1G -Xmx$DRILL_MAX_HEAP -XX:MaxDirectMemorySize=$DRILL_MAX_DIRECT_MEMORY -XX:MaxPermSize=512M -XX:ReservedCodeCacheSize=1G -ea -Duser.timezone=UTC"
 
-3. Restart SQLLine.
+3. Restart the Drill shell.
 
 4. Confirm that Drill is now set to UTC:
 

http://git-wip-us.apache.org/repos/asf/drill/blob/6089ab01/_docs/tutorials/020-drill-in-10-minutes.md
----------------------------------------------------------------------
diff --git a/_docs/tutorials/020-drill-in-10-minutes.md b/_docs/tutorials/020-drill-in-10-minutes.md
index 98f5758..95db7a9 100755
--- a/_docs/tutorials/020-drill-in-10-minutes.md
+++ b/_docs/tutorials/020-drill-in-10-minutes.md
@@ -15,7 +15,7 @@ You can install Drill in embedded mode on a machine running Linux, Mac OS X, or
 
 This installation procedure includes how to download the Apache Drill archive and extract the contents to a directory on your machine. The Apache Drill archive contains sample JSON and Parquet files that you can query immediately.
 
-After installing Drill, you start  SQLLine. SQLLine is a pure-Java console-based utility for connecting to relational databases and executing SQL commands. SQLLine is used as the shell for Drill. Drill follows the ANSI SQL: 2011 standard with [extensions]({{site.baseurl}}/docs/sql-extensions/) for nested data formats and other capabilities.
+After installing Drill, you start the Drill shell. The Drill shell is a pure-Java console-based utility for connecting to relational databases and executing SQL commands. Drill follows the ANSI SQL: 2011 standard with [extensions]({{site.baseurl}}/docs/sql-extensions/) for nested data formats and other capabilities.
 
 ## Embedded Mode Installation Prerequisites
 
@@ -98,7 +98,7 @@ At this point, you can [run queries]({{ site.baseurl }}/docs/drill-in-10-minutes
 
 ## Stopping Drill
 
-Issue the following command when you want to exit SQLLine:
+Issue the following command when you want to exit the Drill shell:
 
     !quit
 

http://git-wip-us.apache.org/repos/asf/drill/blob/6089ab01/_docs/tutorials/030-analyzing-the-yelp-academic-dataset.md
----------------------------------------------------------------------
diff --git a/_docs/tutorials/030-analyzing-the-yelp-academic-dataset.md b/_docs/tutorials/030-analyzing-the-yelp-academic-dataset.md
index 7e638b6..3a53efd 100644
--- a/_docs/tutorials/030-analyzing-the-yelp-academic-dataset.md
+++ b/_docs/tutorials/030-analyzing-the-yelp-academic-dataset.md
@@ -35,9 +35,9 @@ want to scale your environment.
 
     tar -xvf apache-drill-0.1.0.tar.gz
 
-### Step 3: Launch SQLLine, a JDBC application that ships with Drill
+### Step 3: Start the Drill shell.
 
-    bin/sqlline -u jdbc:drill:zk=local
+    bin/drill-embedded
 
 That’s it! You are now ready explore the data.
 

http://git-wip-us.apache.org/repos/asf/drill/blob/6089ab01/_docs/tutorials/050-analyzing-highly-dynamic-datasets.md
----------------------------------------------------------------------
diff --git a/_docs/tutorials/050-analyzing-highly-dynamic-datasets.md b/_docs/tutorials/050-analyzing-highly-dynamic-datasets.md
index ffbf1b3..bb5680b 100644
--- a/_docs/tutorials/050-analyzing-highly-dynamic-datasets.md
+++ b/_docs/tutorials/050-analyzing-highly-dynamic-datasets.md
@@ -38,9 +38,9 @@ Install Drill locally on your desktop (embedded mode). You don’t need Hadoop.
 
 ----------
 
-Step 2: Launch the SQLLine, a pre-packaged JDBC-based application with Drill
+Step 2: Start the Drill shell.
 
-    bin/sqlline -u jdbc:drill:zk=local  
+    bin/drill-embedded
 
 ----------
 

http://git-wip-us.apache.org/repos/asf/drill/blob/6089ab01/_docs/tutorials/learn-drill-with-the-mapr-sandbox/020-getting-to-know-the-drill-sandbox.md
----------------------------------------------------------------------
diff --git a/_docs/tutorials/learn-drill-with-the-mapr-sandbox/020-getting-to-know-the-drill-sandbox.md b/_docs/tutorials/learn-drill-with-the-mapr-sandbox/020-getting-to-know-the-drill-sandbox.md
index ee5133f..bf1a136 100644
--- a/_docs/tutorials/learn-drill-with-the-mapr-sandbox/020-getting-to-know-the-drill-sandbox.md
+++ b/_docs/tutorials/learn-drill-with-the-mapr-sandbox/020-getting-to-know-the-drill-sandbox.md
@@ -12,7 +12,7 @@ example:
 
 Using the secure shell instead of the VM interface has some advantages. You can copy/paste commands from the tutorial and avoid mouse control problems.
 
-Drill includes SQLLine, a JDBC utility for connecting to relational databases and executing SQL commands. `SQLLine` is a sql client on the sandbox that starts Drill only in embedded mode. After logging into the sandbox,  use the `SQLLine` command to start SQLLine for executing Drill queries in embedded mode.  
+Drill includes a shell for connecting to relational databases and executing SQL commands. On the sandbox, the Drill shell runs in embedded mode. After logging into the sandbox,  use the `SQLLine` command to start SQLLine for executing Drill queries in embedded mode.  
 
     [mapr@maprdemo ~]# sqlline
     sqlline version 1.1.6

http://git-wip-us.apache.org/repos/asf/drill/blob/6089ab01/_docs/tutorials/learn-drill-with-the-mapr-sandbox/030-lesson-1-learn-about-the-data-set.md
----------------------------------------------------------------------
diff --git a/_docs/tutorials/learn-drill-with-the-mapr-sandbox/030-lesson-1-learn-about-the-data-set.md b/_docs/tutorials/learn-drill-with-the-mapr-sandbox/030-lesson-1-learn-about-the-data-set.md
index a6153d1..9344006 100644
--- a/_docs/tutorials/learn-drill-with-the-mapr-sandbox/030-lesson-1-learn-about-the-data-set.md
+++ b/_docs/tutorials/learn-drill-with-the-mapr-sandbox/030-lesson-1-learn-about-the-data-set.md
@@ -22,17 +22,17 @@ This lesson consists of select * queries on each data source.
 
 ## Before You Begin
 
-### Start SQLLine
+### Start the Drill Shell
 
-If SQLLine is not already started, use a Terminal or Command window to log
+If the Drill shell is not already started, use a Terminal or Command window to log
 into the demo VM as root, then enter `sqlline`, as described in ["Getting to Know the Sandbox"]({{ site.baseurl }}/docs/getting-to-know-the-drill-sandbox):
 
-You can run queries from the `sqlline` prompt to complete the tutorial. To exit from
-SQLLine, type:
+You can run queries to complete the tutorial. To exit from
+the Drill shell, type:
 
     0: jdbc:drill:> !quit
 
-Examples in this tutorial use SQLLine. You can also execute queries using the Drill Web UI.
+Examples in this tutorial use the Drill shell. You can also execute queries using the Drill Web UI.
 
 ### List the available workspaces and databases:
 


[27/31] drill git commit: jinfeng's review, DRILL-3120

Posted by ts...@apache.org.
jinfeng's review, DRILL-3120


Project: http://git-wip-us.apache.org/repos/asf/drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/drill/commit/34c8b14b
Tree: http://git-wip-us.apache.org/repos/asf/drill/tree/34c8b14b
Diff: http://git-wip-us.apache.org/repos/asf/drill/diff/34c8b14b

Branch: refs/heads/gh-pages
Commit: 34c8b14b26463e3367a04fc6707989ee42be48be
Parents: 9701675
Author: Kristine Hahn <kh...@maprtech.com>
Authored: Mon May 18 14:18:43 2015 -0700
Committer: Kristine Hahn <kh...@maprtech.com>
Committed: Mon May 18 14:18:43 2015 -0700

----------------------------------------------------------------------
 .../050-starting-drill-on-windows.md             |  2 +-
 _docs/tutorials/020-drill-in-10-minutes.md       | 19 +++++++++++--------
 2 files changed, 12 insertions(+), 9 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/drill/blob/34c8b14b/_docs/install/installing-drill-in-embedded-mode/050-starting-drill-on-windows.md
----------------------------------------------------------------------
diff --git a/_docs/install/installing-drill-in-embedded-mode/050-starting-drill-on-windows.md b/_docs/install/installing-drill-in-embedded-mode/050-starting-drill-on-windows.md
index 942d727..3f8caef 100644
--- a/_docs/install/installing-drill-in-embedded-mode/050-starting-drill-on-windows.md
+++ b/_docs/install/installing-drill-in-embedded-mode/050-starting-drill-on-windows.md
@@ -4,7 +4,7 @@ parent: "Installing Drill in Embedded Mode"
 ---
 Start the Drill shell using the **sqlline command**. The `zk=local` means the local node is the ZooKeeper node. Complete the following steps to launch the Drill shell:
 
-1. Open Command Prompt.
+1. Open Command Prompt.  
 2. Open the apache-drill-1.0.0 folder. For example:  
    ``cd apache-drill-1.0.0``
 3. Go to the bin directory. For example:  

http://git-wip-us.apache.org/repos/asf/drill/blob/34c8b14b/_docs/tutorials/020-drill-in-10-minutes.md
----------------------------------------------------------------------
diff --git a/_docs/tutorials/020-drill-in-10-minutes.md b/_docs/tutorials/020-drill-in-10-minutes.md
index 5a51043..a0b6203 100755
--- a/_docs/tutorials/020-drill-in-10-minutes.md
+++ b/_docs/tutorials/020-drill-in-10-minutes.md
@@ -86,14 +86,17 @@ At this point, you can start Drill.
 ## Start Drill on Windows
 Start Drill by running the sqlline.bat file and typing a connection string, as shown in the following procedure. The `zk=local` in the connection string means the local node is the ZooKeeper node:
 
-1. Open the apache-drill-1.0.0 folder.  
-2. Open the bin folder, and double-click the `sqlline.bat` file:
-   ![drill bin dir]({{ site.baseurl }}/docs/img/drill-bin.png)
-   The Windows command prompt opens.  
-3. At the sqlline> prompt, type `!connect jdbc:drill:zk=local` and then press Enter:
-   ![sqlline]({{ site.baseurl }}/docs/img/sqlline1.png)
-4. Enter the username, `admin`, and password, also `admin` when prompted.  
-   The `0: jdbc:drill:zk=local>` prompt appears.
+Start the Drill shell using the **sqlline command**. The `zk=local` means the local node is the ZooKeeper node. Complete the following steps to launch the Drill shell:
+
+1. Open Command Prompt.  
+2. Open the apache-drill-1.0.0 folder. For example:  
+   ``cd apache-drill-1.0.0``
+3. Go to the bin directory. For example:  
+   ``cd bin``
+4. Type the following command on the command line:
+   ``sqlline.bat -u "jdbc:drill:zk=local"``
+   ![drill install dir]({{ site.baseurl }}/docs/img/sqlline1.png)
+
 At this point, you can [run queries]({{ site.baseurl }}/docs/drill-in-10-minutes#query-sample-data).
 
 ## Stopping Drill


[07/31] drill git commit: add perf tune structure and links, drillbit_queries.json, remove videos

Posted by ts...@apache.org.
http://git-wip-us.apache.org/repos/asf/drill/blob/cd0a0e96/_docs/getting-started/020-why-drill.md
----------------------------------------------------------------------
diff --git a/_docs/getting-started/020-why-drill.md b/_docs/getting-started/020-why-drill.md
index d00d882..2e57b88 100644
--- a/_docs/getting-started/020-why-drill.md
+++ b/_docs/getting-started/020-why-drill.md
@@ -7,27 +7,25 @@ parent: "Getting Started"
 
 ### 1. Get started in minutes
 
-It only takes a couple of minutes to start working with Drill. Untar it on your Mac or Windows laptop and run a query on a local file. No need to set up any infrastructure. No need to define schemas. Just point at the data and drill!
+It takes a couple of minutes to start working with Drill. Untar the Drill software on your Mac or Windows laptop and run a query on a local file. No need to set up any infrastructure or to define schemas. Just point to the data, such as data in a file, directory, HBase table, and drill.
 
     $ tar -xvf apache-drill-<version>.tar.gz
-    $ <install directory>/bin/sqlline -u jdbc:drill:zk=local
-    0: jdbc:drill:zk=local> USE cp; 
-    0: jdbc:drill:zk=local> SELECT * FROM employee.json limit 5;
-    +-------------+------------------+------------+------------+-------------+----------------------+------------+---------------+-----
-    | employee_id | full_name        | first_name | last_name  | position_id | position_title       |  store_id  | department_id | birt 
-    +-------------+------------------+------------+------------+-------------+----------------------+------------+---------------+------+
-    | 1           | Sheri Nowmer     | Sheri      | Nowmer     | 1           | President            | 0          | 1             | 19   
-    | 2           | Derrick Whelply  | Derrick    | Whelply    | 2           | VP Country Manager   | 0          | 1             |
-    | 4           | Michael Spence   | Michael    | Spence     | 2           | VP Country Manager   | 0          | 1             |
-    | 5           | Maya Gutierrez   | Maya       | Gutierrez  | 2           | VP Country Manager   | 0          | 1             |
-    | 6           | Roberta Damstra  | Roberta    | Damstra    | 3           | VP Information Systems | 0        | 2             |
-    +-------------+------------------+------------+------------+-------------+----------------------+------------+---------------+-----
+    $ <install directory>/bin/drill-embedded
+    0: jdbc:drill:zk=local> SELECT * FROM cp.`employee.json` LIMIT 5;
+    +--------------+----------------------------+---------------------+---------------+--------------+----------------------------+-----------+----------------+-------------+------------------------+----------+----------------+----------------------+-----------------+---------+-----------------------+
+    | employee_id  |         full_name          |     first_name      |   last_name   | position_id  |       position_title       | store_id  | department_id  | birth_date  |       hire_date        |  salary  | supervisor_id  |   education_level    | marital_status  | gender  |    management_role    |
+    +--------------+----------------------------+---------------------+---------------+--------------+----------------------------+-----------+----------------+-------------+------------------------+----------+----------------+----------------------+-----------------+---------+-----------------------+
+    | 1            | Sheri Nowmer               | Sheri               | Nowmer        | 1            | President                  | 0         | 1              | 1961-08-26  | 1994-12-01 00:00:00.0  | 80000.0  | 0              | Graduate Degree      | S               | F       | Senior Management     |
+    | 2            | Derrick Whelply            | Derrick             | Whelply       | 2            | VP Country Manager         | 0         | 1              | 1915-07-03  | 1994-12-01 00:00:00.0  | 40000.0  | 1              | Graduate Degree      | M               | M       | Senior Management     |
+    | 4            | Michael Spence             | Michael             | Spence        | 2            | VP Country Manager         | 0         | 1              | 1969-06-20  | 1998-01-01 00:00:00.0  | 40000.0  | 1              | Graduate Degree      | S               | M       | Senior Management     |
+    | 5            | Maya Gutierrez             | Maya                | Gutierrez     | 2            | VP Country Manager         | 0         | 1              | 1951-05-10  | 1998-01-01 00:00:00.0  | 35000.0  | 1              | Bachelors Degree     | M               | F       | Senior Management     |
+
 
 ## 2. Schema-free JSON model
-Drill is the world's first and only distributed SQL engine that doesn't require schemas. It shares the same schema-free JSON model as MongoDB and Elasticsearch. Instead of spending weeks or months defining schemas, transforming data (ETL) and maintaining those schemas, simply point Drill at your data (file, directory, HBase table, etc.) and run your queries. Drill automatically understands the structure of the data. Drill's self-service approach reduces the burden on IT and increases the productivity and agility of analysts and developers.
+Drill is the world's first and only distributed SQL engine that doesn't require schemas. It shares the same schema-free JSON model as MongoDB and Elasticsearch. No need to define and maintain schemas or transform data (ETL). Drill automatically understands the structure of the data. 
 
 ## 3. Query complex, semi-structured data in-situ
-Drill's schema-free JSON model allows you to query complex, semi-structured data in situ. No need to flatten or transform the data prior to or during query execution. Drill also provides intuitive extensions to SQL to work with nested data. Here's a simple query on a JSON file demonstrating how to access nested elements and arrays:
+Using Drill's schema-free JSON model, you can query complex, semi-structured data in situ. No need to flatten or transform the data prior to or during query execution. Drill also provides intuitive extensions to SQL to work with nested data. Here's a simple query on a JSON file demonstrating how to access nested elements and arrays:
 
     SELECT * FROM (SELECT t.trans_id,
                           t.trans_info.prod_id[0] AS prod_id,
@@ -56,7 +54,7 @@ Drill supports the standard SQL:2003 syntax. No need to learn a new "SQL-like" l
           ORDER BY o.o_orderpriority;
 
 ## 5. Leverage standard BI tools
-Drill works with standard BI tools. You can keep using the tools you love, such as Tableau, MicroStrategy, QlikView and Excel. No need to introduce yet another visualization or dashboard tool. Combine a self-service BI tool with the only self-service SQL engine to enable true self-service data exploration.
+Drill works with standard BI tools. You can use your existing tools, such as Tableau, MicroStrategy, QlikView and Excel. 
 
 ## 6. Interactive queries on Hive tables
 Apache Drill lets you leverage your investments in Hive. You can run interactive queries with Drill on your Hive tables and access all Hive input/output formats (including custom SerDes). You can join tables associated with different Hive metastores, and you can join a Hive table with an HBase table or a directory of log files. Here's a simple query in Drill on a Hive table:
@@ -68,7 +66,7 @@ Apache Drill lets you leverage your investments in Hive. You can run interactive
 
 
 ## 7. Access multiple data sources
-Drill is designed with extensibility in mind. It provides out-of-the-box connectivity to file systems (local or distributed file systems such as S3, HDFS and MapR-FS), HBase and Hive. You can implement a storage plugin to make Drill work with any other data source. Drill can combine data from multiple data sources on the fly in a single query, with no centralized metadata definitions. Here's a query that combines data from a Hive table, an HBase table (view) and a JSON file:
+Drill is extensible. You can connect Drill out-of-the-box to file systems (local or distributed, such as S3, HDFS and MapR-FS), HBase and Hive. You can implement a storage plugin to make Drill work with any other data source. Drill can combine data from multiple data sources on the fly in a single query, with no centralized metadata definitions. Here's a query that combines data from a Hive table, an HBase table (view) and a JSON file:
 
     SELECT custview.membership, sum(orders.order_total) AS sales
     FROM hive.orders, custview, dfs.`clicks/clicks.json` c 
@@ -81,7 +79,7 @@ Drill exposes a simple and high-performance Java API to build custom functions (
 
 
 ## 9. High performance
-Drill is designed from the ground up for high throughput and low latency. It doesn't use a general purpose execution engine like MapReduce, Tez or Spark. As a result, Drill is able to deliver its unparalleled flexibility (schema-free JSON model) without compromising performance. Drill's optimizer leverages rule- and cost-based techniques, as well as data locality and operator push-down (the ability to push down query fragments into the back-end data sources). Drill also provides a columnar and vectorized execution engine, resulting in higher memory and CPU efficiency.
+Drill is designed from the ground up for high throughput and low latency. It doesn't use a general purpose execution engine like MapReduce, Tez or Spark. As a result, Drill is flexible (schema-free JSON model) and performant. Drill's optimizer leverages rule- and cost-based techniques, as well as data locality and operator push-down, which is the capability to push down query fragments into the back-end data sources. Drill also provides a columnar and vectorized execution engine, resulting in higher memory and CPU efficiency.
 
 ## 10. Scales from a single laptop to a 1000-node cluster
-Drill is available as a simple download you can run on your laptop. When you're ready to analyze larger datasets, simply deploy Drill on your Hadoop cluster (up to 1000 commodity servers). Drill leverages the aggregate memory in the cluster to execute queries using an optimistic pipelined model, and automatically spills to disk when the working set doesn't fit in memory.
+Drill is available as a simple download you can run on your laptop. When you're ready to analyze larger datasets, deploy Drill on your Hadoop cluster (up to 1000 commodity servers). Drill leverages the aggregate memory in the cluster to execute queries using an optimistic pipelined model, and automatically spills to disk when the working set doesn't fit in memory.

http://git-wip-us.apache.org/repos/asf/drill/blob/cd0a0e96/_docs/performance-tuning/010-performance-tuning-introduction.md
----------------------------------------------------------------------
diff --git a/_docs/performance-tuning/010-performance-tuning-introduction.md b/_docs/performance-tuning/010-performance-tuning-introduction.md
new file mode 100644
index 0000000..1dffb56
--- /dev/null
+++ b/_docs/performance-tuning/010-performance-tuning-introduction.md
@@ -0,0 +1,17 @@
+---
+title: "Performance Tuning Introduction"
+parent: "Performance Tuning"
+---
+You can apply performance tuning measures to improve how efficiently Drill queries data. To significantly improve performance in Drill, you must have knowledge about the underlying data and data sources, as well as familiarity with how Drill executes queries.
+
+You can analyze query plans and profiles to identify the source of performance issues in Drill. Once you have isolated the source of an issue, you can apply the following tuning techniques to improve query performance:
+
+* Modify query planning options
+* Modify broadcast join options
+* Switch between 1 or 2 phase aggregation
+* Enable/disable hash-based memory-constrained operators
+* Enable query queuing
+* Control parallelization
+* Organize data for partition pruning
+* Change storage formats
+* Disable Logging (See Logging and Debugging)


[14/31] drill git commit: fix links

Posted by ts...@apache.org.
http://git-wip-us.apache.org/repos/asf/drill/blob/5f6a51af/_docs/sql-reference/sql-functions/020-data-type-conversion.md
----------------------------------------------------------------------
diff --git a/_docs/sql-reference/sql-functions/020-data-type-conversion.md b/_docs/sql-reference/sql-functions/020-data-type-conversion.md
index c60c096..82e8931 100644
--- a/_docs/sql-reference/sql-functions/020-data-type-conversion.md
+++ b/_docs/sql-reference/sql-functions/020-data-type-conversion.md
@@ -47,6 +47,8 @@ The following examples show how to cast a string to a number, a number to a stri
 ### Casting a Character String to a Number
 You cannot cast a character string that includes a decimal point to an INT or BIGINT. For example, if you have "1200.50" in a JSON file, attempting to select and cast the string to an INT fails. As a workaround, cast to a FLOAT or DECIMAL type, and then to an INT. 
 
+{% include startnote.html %}In this release, Drill disables the DECIMAL data type. To enable, set the planner.enable_decimal_data_type option to true.{% include endnote.html %}
+
 The following example shows how to cast a character to a DECIMAL having two decimal places.
 
     SELECT CAST('1' as DECIMAL(28, 2)) FROM sys.version;
@@ -56,8 +58,6 @@ The following example shows how to cast a character to a DECIMAL having two deci
     | 1.00       |
     +------------+
 
-{% include startnote.html %}In this release, Drill disables the DECIMAL data type. To enable, set the planner.enable_decimal_data_type option to true.{% include endnote.html %}
-
 ### Casting a Number to a Character String
 The first example shows Drill casting a number to a VARCHAR having a length of 3 bytes: The result is a 3-character string, 456. Drill supports the CHAR and CHARACTER VARYING alias.
 
@@ -82,12 +82,11 @@ The first example shows Drill casting a number to a VARCHAR having a length of 3
 Cast an integer to a decimal.
 
     SELECT CAST(-2147483648 AS DECIMAL(28,8)) FROM sys.version;
-    +------------+
-    |   EXPR$0   |
-    +------------+
-    | -2.147483648E9 |
-    +------------+
-    1 row selected (0.08 seconds)
+    +-----------------+
+    |     EXPR$0      |
+    +-----------------+
+    | -2.147483648E9  |
+    +-----------------+
 
 {% include startnote.html %}In this release, Drill disables the DECIMAL data type. To enable, set the planner.enable_decimal_data_type option to true.{% include endnote.html %}
 
@@ -108,12 +107,12 @@ For example, a JSON file named intervals.json contains the following objects:
 
         ALTER SESSION SET `store.format` = 'parquet';
 
-        +------------+------------+
-        |     ok     |  summary   |
-        +------------+------------+
-        | true       | store.format updated. |
-        +------------+------------+
-        1 row selected (0.037 seconds)
+        +-------+------------------------+
+        |  ok   |        summary         |
+        +-------+------------------------+
+        | true  | store.format updated.  |
+        +-------+------------------------+
+        1 row selected (0.072 seconds)
 
 2. Use a CTAS statement to cast text from a JSON file to year and day intervals and to write the data to a Parquet table:
 
@@ -153,14 +152,14 @@ This example shows how to use the CONVERT_FROM function to convert complex HBase
 
     SELECT * FROM students;
         
-    +------------+------------+------------+
-    |  row_key   |  account   |  address   |
-    +------------+------------+------------+
-    | [B@e6d9eb7 | {"name":"QWxpY2U="} | {"state":"Q0E=","street":"MTIzIEJhbGxtZXIgQXY=","zipcode":"MTIzNDU="} |
-    | [B@2823a2b4 | {"name":"Qm9i"} | {"state":"Q0E=","street":"MSBJbmZpbml0ZSBMb29w","zipcode":"MTIzNDU="} |
-    | [B@3b8eec02 | {"name":"RnJhbms="} | {"state":"Q0E=","street":"NDM1IFdhbGtlciBDdA==","zipcode":"MTIzNDU="} |
+    +-------------+---------------------+---------------------------------------------------------------------------+
+    |   row_key   |  account            |                               address                                     |
+    +-------------+---------------------+---------------------------------------------------------------------------+
+    | [B@e6d9eb7  | {"name":"QWxpY2U="} | {"state":"Q0E=","street":"MTIzIEJhbGxtZXIgQXY=","zipcode":"MTIzNDU="}     |
+    | [B@2823a2b4 | {"name":"Qm9i"}     | {"state":"Q0E=","street":"MSBJbmZpbml0ZSBMb29w","zipcode":"MTIzNDU="}     |
+    | [B@3b8eec02 | {"name":"RnJhbms="} | {"state":"Q0E=","street":"NDM1IFdhbGtlciBDdA==","zipcode":"MTIzNDU="}     |
     | [B@242895da | {"name":"TWFyeQ=="} | {"state":"Q0E=","street":"NTYgU291dGhlcm4gUGt3eQ==","zipcode":"MTIzNDU="} |
-    +------------+------------+------------+
+    +-------------+---------------------+---------------------------------------------------------------------------+
     4 rows selected (1.335 seconds)
 
 You use the CONVERT_FROM function to decode the binary data to render it readable, selecting a data type to use from the [list of supported types]({{ site.baseurl }}/docs/data-type-conversion/#convert_to-and-convert_from-data-types). JSON supports strings. To convert binary to strings, use the UTF8 type.:
@@ -171,25 +170,25 @@ You use the CONVERT_FROM function to decode the binary data to render it readabl
            CONVERT_FROM(students.address.street, 'UTF8') AS street, 
            CONVERT_FROM(students.address.zipcode, 'UTF8') AS zipcode FROM students;
 
-    +------------+------------+------------+------------+------------+
-    | studentid  |    name    |   state    |   street   |  zipcode   |
-    +------------+------------+------------+------------+------------+
-    | student1   | Alice      | CA         | 123 Ballmer Av | 12345      |
-    | student2   | Bob        | CA         | 1 Infinite Loop | 12345      |
-    | student3   | Frank      | CA         | 435 Walker Ct | 12345      |
+    +------------+------------+------------+------------------+------------+
+    | studentid  |    name    |   state    |      street      |  zipcode   |
+    +------------+------------+------------+------------------+------------+
+    | student1   | Alice      | CA         | 123 Ballmer Av   | 12345      |
+    | student2   | Bob        | CA         | 1 Infinite Loop  | 12345      |
+    | student3   | Frank      | CA         | 435 Walker Ct    | 12345      |
     | student4   | Mary       | CA         | 56 Southern Pkwy | 12345      |
-    +------------+------------+------------+------------+------------+
+    +------------+------------+------------+------------------+------------+
     4 rows selected (0.504 seconds)
 
 This example converts from VARCHAR to a JSON map:
 
     SELECT CONVERT_FROM('{x:100, y:215.6}' ,'JSON') AS MYCOL FROM sys.version;
-    +------------+
-    |   MYCOL    |
-    +------------+
-    | {"x":100,"y":215.6} |
-    +------------+
-    1 row selected (0.073 seconds)
+    +----------------------+
+    |        MYCOL         |
+    +----------------------+
+    | {"x":100,"y":215.6}  |
+    +----------------------+
+    1 row selected (0.163 seconds)
 
 This example uses a list of BIGINT as input and returns a repeated list of vectors:
 
@@ -204,12 +203,12 @@ This example uses a list of BIGINT as input and returns a repeated list of vecto
 This example uses a map as input to return a repeated list vector (JSON).
 
     SELECT CONVERT_FROM('[{a : 100, b: 200}, {a:300, b: 400}]' ,'JSON') AS MYCOL1  FROM sys.version;
-    +------------+
-    |   MYCOL1   |
-    +------------+
-    | [{"a":100,"b":200},{"a":300,"b":400}] |
-    +------------+
-    1 row selected (0.074 seconds)
+    +--------------------+
+    |       MYCOL1       |
+    +--------------------+
+    | [[1,2],[3,4],[5]]  |
+    +--------------------+
+    1 row selected (0.141 seconds)
 
 ### Set Up a Storage Plugin for Working with HBase Files
 
@@ -311,12 +310,12 @@ First, you set the storage format to JSON. Next, you use the CREATE TABLE AS SEL
 6. Set up Drill to store data in Parquet format.
 
         ALTER SESSION SET `store.format`='parquet';
-        +------------+------------+
-        |     ok     |  summary   |
-        +------------+------------+
-        | true       | store.format updated. |
-        +------------+------------+
-        1 row selected (0.056 seconds)
+        +-------+------------------------+
+        |  ok   |        summary         |
+        +-------+------------------------+
+        | true  | store.format updated.  |
+        +-------+------------------------+
+        1 row selected (0.07 seconds)
 
 7. Use CONVERT_TO to convert the JSON data to a binary format in the Parquet file.
 
@@ -337,14 +336,14 @@ First, you set the storage format to JSON. Next, you use the CREATE TABLE AS SEL
 8. Take a look at the binary Parquet output:
 
         SELECT * FROM tmp.`json2parquet`;
-        +------------+------------+------------+------------+------------+
-        |     id     |    name    |   state    |   street   |    zip     |
-        +------------+------------+------------+------------+------------+
+        +-------------+-------------+-------------+-------------+-------------+
+        |      id     |    name     |    state    |   street    |     zip     |
+        +-------------+-------------+-------------+-------------+-------------+
         | [B@224388b2 | [B@7fc36fb0 | [B@77d9cd57 | [B@7c384839 | [B@530dd5e5 |
-        | [B@3155d7fc | [B@7ad6fab1 | [B@37e4b978 | [B@94c91f3 | [B@201ed4a |
+        | [B@3155d7fc | [B@7ad6fab1 | [B@37e4b978 | [B@94c91f3  | [B@201ed4a  |
         | [B@4fb2c078 | [B@607a2f28 | [B@75ae1c93 | [B@79d63340 | [B@5dbeed3d |
-        | [B@2fcfec74 | [B@7baccc31 | [B@d91e466 | [B@6529eb7f | [B@232412bc |
-        +------------+------------+------------+------------+------------+
+        | [B@2fcfec74 | [B@7baccc31 | [B@d91e466  | [B@6529eb7f | [B@232412bc |
+        +-------------+-------------+-------------+-------------+-------------+
         4 rows selected (0.12 seconds)
 
 9. Use CONVERT_FROM to convert the Parquet data to a readable format:
@@ -356,14 +355,14 @@ First, you set the storage format to JSON. Next, you use the CREATE TABLE AS SEL
                CONVERT_FROM(zip, 'UTF8') AS zip 
         FROM tmp.`json2parquet2`;
 
-        +------------+------------+------------+------------+------------+
-        |     id     |    name    |   state    |  address   |    zip     |
-        +------------+------------+------------+------------+------------+
-        | student1   | Alice      | CA         | 123 Ballmer Av | 12345      |
-        | student2   | Bob        | CA         | 1 Infinite Loop | 12345      |
-        | student3   | Frank      | CA         | 435 Walker Ct | 12345      |
+        +------------+------------+------------+------------------+------------+
+        |     id     |    name    |   state    |  address         |    zip     |
+        +------------+------------+------------+------------------+------------+
+        | student1   | Alice      | CA         | 123 Ballmer Av   | 12345      |
+        | student2   | Bob        | CA         | 1 Infinite Loop  | 12345      |
+        | student3   | Frank      | CA         | 435 Walker Ct    | 12345      |
         | student4   | Mary       | CA         | 56 Southern Pkwy | 12345      |
-        +------------+------------+------------+------------+------------+
+        +------------+------------+------------+------------------+------------+
         4 rows selected (0.182 seconds)
 
 ## Other Data Type Conversions
@@ -481,11 +480,12 @@ Convert an integer to a character string.
 Convert a date to a character string.
 
     SELECT TO_CHAR((CAST('2008-2-23' AS DATE)), 'yyyy-MMM-dd') FROM sys.version;
-    +------------+
-    |   EXPR$0   |
-    +------------+
-    | 2008-Feb-23 |
-    +------------+
+    +--------------+
+    |    EXPR$0    |
+    +--------------+
+    | 2008-Feb-23  |
+    +--------------+
+    1 row selected (0.166 seconds)
 
 Convert a time to a string.
 
@@ -501,12 +501,12 @@ Convert a time to a string.
 Convert a timestamp to a string.
 
     SELECT TO_CHAR(CAST('2015-2-23 12:00:00' AS TIMESTAMP), 'yyyy MMM dd HH:mm:ss') FROM sys.version;
-    +------------+
-    |   EXPR$0   |
-    +------------+
-    | 2015 Feb 23 12:00:00 |
-    +------------+
-    1 row selected (0.075 seconds)
+    +-----------------------+
+    |        EXPR$0         |
+    +-----------------------+
+    | 2015 Feb 23 12:00:00  |
+    +-----------------------+
+    1 row selected (0.142 seconds)
 
 ## TO_DATE
 Converts a character string or a UNIX epoch timestamp to a date.
@@ -690,21 +690,22 @@ Specify a format using patterns defined in [Java DateTimeFormat class](http://jo
 Convert a date to a timestamp. 
 
     SELECT TO_TIMESTAMP('2008-2-23 12:00:00', 'yyyy-MM-dd HH:mm:ss') FROM sys.version;
-    +------------+
-    |   EXPR$0   |
-    +------------+
-    | 2008-02-23 12:00:00.0 |
-    +------------+
+    +------------------------+
+    |         EXPR$0         |
+    +------------------------+
+    | 2008-02-23 12:00:00.0  |
+    +------------------------+
+    1 row selected (0.126 seconds)
 
 Convert Unix Epoch time to a timestamp.
 
     SELECT TO_TIMESTAMP(1427936330) FROM sys.version;
-    +------------+
-    |   EXPR$0   |
-    +------------+
-    | 2015-04-01 17:58:50.0 |
-    +------------+
-    1 row selected (0.094 seconds)
+    +------------------------+
+    |         EXPR$0         |
+    +------------------------+
+    | 2015-04-01 17:58:50.0  |
+    +------------------------+
+    1 row selected (0.114 seconds)
 
 Convert a UTC date to a timestamp offset from the UTC time zone code.
 
@@ -712,12 +713,12 @@ Convert a UTC date to a timestamp offset from the UTC time zone code.
            TO_CHAR(TO_TIMESTAMP('2015-03-30 20:49:59.0 UTC', 'YYYY-MM-dd HH:mm:ss.s z'), 'z') AS New_TZ 
     FROM sys.version;
 
-    +------------+------------+
-    |  Original  |   New_TZ   |
-    +------------+------------+
-    | 2015-03-30 20:49:00.0 | UTC        |
-    +------------+------------+
-    1 row selected (0.129 seconds)
+    +------------------------+---------+
+    |        Original        | New_TZ  |
+    +------------------------+---------+
+    | 2015-03-30 20:49:00.0  | UTC     |
+    +------------------------+---------+
+    1 row selected (0.148 seconds)
 
 ## Time Zone Limitation
 Currently Drill does not support conversion of a date, time, or timestamp from one time zone to another. Queries of data associated with a time zone can return inconsistent results or an error. For more information, see the ["Understanding Drill's Timestamp and Timezone"](http://www.openkb.info/2015/05/understanding-drills-timestamp-and.html#.VUzhotpVhHw) blog. The Drill time zone is based on the operating system time zone unless you override it. To work around the limitation, configure Drill to use [UTC](http://www.timeanddate.com/time/aboututc.html)-based time, convert your data to UTC timestamps, and perform date/time operation in UTC.  
@@ -726,12 +727,12 @@ Currently Drill does not support conversion of a date, time, or timestamp from o
 
         SELECT TIMEOFDAY() FROM sys.version;
 
-        +------------+
-        |   EXPR$0   |
-        +------------+
-        | 2015-04-02 15:01:31.114 America/Los_Angeles |
-        +------------+
-        1 row selected (1.199 seconds)
+        +----------------------------------------------+
+        |                    EXPR$0                    |
+        +----------------------------------------------+
+        | 2015-05-17 22:37:29.516 America/Los_Angeles  |
+        +----------------------------------------------+
+        1 row selected (0.108 seconds)
 
 2. Configure the default time zone format in <drill installation directory>/conf/drill-env.sh by adding `-Duser.timezone=UTC` to DRILL_JAVA_OPTS. For example:
 
@@ -743,12 +744,12 @@ Currently Drill does not support conversion of a date, time, or timestamp from o
 
         SELECT TIMEOFDAY() FROM sys.version;
 
-        +------------+
-        |   EXPR$0   |
-        +------------+
-        | 2015-04-02 17:05:02.424 UTC |
-        +------------+
-        1 row selected (1.191 seconds)
+        +----------------------------------------------+
+        |                    EXPR$0                    |
+        +----------------------------------------------+
+        | 2015-05-17 22:37:57.082 America/Los_Angeles  |
+        +----------------------------------------------+
+        1 row selected (0.087 seconds)
 
 You can use the ‘z’ option to identify the time zone in TO_TIMESTAMP to make sure the timestamp has the timezone in it. Also, use the ‘z’ option to identify the time zone in a timestamp using the TO_CHAR function. For example:
 
@@ -756,12 +757,12 @@ You can use the ‘z’ option to identify the time zone in TO_TIMESTAMP to make
            TO_CHAR(TO_TIMESTAMP('2015-03-30 20:49:59.0 UTC', 'YYYY-MM-dd HH:mm:ss.s z'), 'z') AS TimeZone 
            FROM sys.version;
 
-    +------------+------------+
-    |  Original  |  TimeZone  |
-    +------------+------------+
-    | 2015-03-30 20:49:00.0 | UTC        |
-    +------------+------------+
-    1 row selected (0.299 seconds)
+    +------------------------+-----------+
+    |        Original        | TimeZone  |
+    +------------------------+-----------+
+    | 2015-03-30 20:49:00.0  | UTC       |
+    +------------------------+-----------+
+    1 row selected (0.097 seconds)
 
 <!-- DRILL-448 Support timestamp with time zone -->
 

http://git-wip-us.apache.org/repos/asf/drill/blob/5f6a51af/_docs/sql-reference/sql-functions/030-date-time-functions-and-arithmetic.md
----------------------------------------------------------------------
diff --git a/_docs/sql-reference/sql-functions/030-date-time-functions-and-arithmetic.md b/_docs/sql-reference/sql-functions/030-date-time-functions-and-arithmetic.md
index 7f6a534..66df6ff 100644
--- a/_docs/sql-reference/sql-functions/030-date-time-functions-and-arithmetic.md
+++ b/_docs/sql-reference/sql-functions/030-date-time-functions-and-arithmetic.md
@@ -5,22 +5,22 @@ parent: "SQL Functions"
 
 In addition to the TO_DATE, TO_TIME, and TO_TIMESTAMP functions, Drill supports a number of other date/time functions and arithmetic operators for use with dates, times, and intervals. Drill supports time functions based on the Gregorian calendar and in the range 1971 to 2037.
 
-This section defines the following date/time functions:
+This section covers the Drill [time zone limitation]({{site.baseurl}}/docs/data-type-conversion/#time-zone-limitation) and defines the following date/time functions:
 
 **Function**| **Return Type**  
 ---|---  
 [AGE(TIMESTAMP)]({{ site.baseurl }}/docs/date-time-functions-and-arithmetic#age)| INTERVALDAY or INTERVALYEAR
 [EXTRACT(field from time_expression)]({{ site.baseurl }}/docs/date-time-functions-and-arithmetic#extract)| DOUBLE
-[CURRENT_DATE]({{ site.baseurl }}/docs/date-time-functions-and-arithmetic#current_*x*-local*x*-now-and-timeofday)| DATE  
-[CURRENT_TIME]({{ site.baseurl }}/docs/date-time-functions-and-arithmetic#current_*x*-local*x*-now-and-timeofday)| TIME   
-[CURRENT_TIMESTAMP]({{ site.baseurl }}/docs/date-time-functions-and-arithmetic#current_*x*-local*x*-now-and-timeofday)| TIMESTAMP 
+[CURRENT_DATE]({{ site.baseurl }}/docs/date-time-functions-and-arithmetic/#other-date-and-time-functions)| DATE  
+[CURRENT_TIME]({{ site.baseurl }}/docs/date-time-functions-and-arithmetic/#other-date-and-time-functions)| TIME   
+[CURRENT_TIMESTAMP]({{ site.baseurl }}/docs/date-time-functions-and-arithmetic/#other-date-and-time-functions)| TIMESTAMP 
 [DATE_ADD]({{ site.baseurl }}/docs/date-time-functions-and-arithmetic#date_add)| DATE, TIMESTAMP  
 [DATE_PART]({{ site.baseurl }}/docs/date-time-functions-and-arithmetic#date_part)| DOUBLE  
 [DATE_SUB]({{ site.baseurl }}/docs/date-time-functions-and-arithmetic#date_sub)| DATE, TIMESTAMOP     
-[LOCALTIME]({{ site.baseurl }}/docs/date-time-functions-and-arithmetic#current_*x*-local*x*-now-and-timeofday)| TIME  
-[LOCALTIMESTAMP]({{ site.baseurl }}/docs/date-time-functions-and-arithmetic#current_*x*-local*x*-now-and-timeofday)| TIMESTAMP  
-[NOW]({{ site.baseurl }}/docs/date-time-functions-and-arithmetic#current_*x*-local*x*-now-and-timeofday)| TIMESTAMP  
-[TIMEOFDAY]({{ site.baseurl }}/docs/date-time-functions-and-arithmetic#current_*x*-local*x*-now-and-timeofday)| VARCHAR  
+[LOCALTIME]({{ site.baseurl }}/docs/date-time-functions-and-arithmetic/#other-date-and-time-functions)| TIME  
+[LOCALTIMESTAMP]({{ site.baseurl }}/docs/date-time-functions-and-arithmetic/#other-date-and-time-functions)| TIMESTAMP  
+[NOW]({{ site.baseurl }}/docs/date-time-functions-and-arithmetic/#other-date-and-time-functions)| TIMESTAMP  
+[TIMEOFDAY]({{ site.baseurl }}/docs/date-time-functions-and-arithmetic/#other-date-and-time-functions)| VARCHAR  
 
 ## AGE
 Returns the interval between two timestamps or subtracts a timestamp from midnight of the current date.
@@ -49,14 +49,14 @@ Find the interval between midnight April 3, 2015 and June 13, 1957.
 Find the interval between 11:10:10 PM on January 1, 2001 and 10:10:10 PM on January 1, 2001.
 
     SELECT AGE(CAST('2010-01-01 10:10:10' AS TIMESTAMP), CAST('2001-01-01 11:10:10' AS TIMESTAMP)) FROM sys.version;
-    +------------+
-    |   EXPR$0   |
-    +------------+
-    | P109M16DT82800S |
-    +------------+
-    1 row selected (0.161 seconds)
+    +------------------+
+    |      EXPR$0      |
+    +------------------+
+    | P109M16DT82800S  |
+    +------------------+
+    1 row selected (0.122 seconds)
 
-For information about how to read the interval data, see the [Interval section]({{ site.baseurl }}/docs/date-time-and-timestamp#interval).
+For information about how to read the interval data, see the [Interval section]({{ site.baseurl }}/docs/date-time-and-timestamp/#intervalyear-and-intervalday).
 
 ## DATE_ADD
 Returns the sum of a date/time and a number of days/hours, or of a date/time and date/time interval.
@@ -92,22 +92,22 @@ Add two days to today's date May 15, 2015.
 Add two months to April 15, 2015.
 
     SELECT DATE_ADD(date '2015-04-15', interval '2' month) FROM sys.version;
-    +------------+
-    |   EXPR$0   |
-    +------------+
-    | 2015-06-15 00:00:00.0 |
-    +------------+
-    1 row selected (0.073 seconds)
+    +------------------------+
+    |         EXPR$0         |
+    +------------------------+
+    | 2015-06-15 00:00:00.0  |
+    +------------------------+
+    1 row selected (0.107 seconds)
 
 Add 10 hours to the timestamp 2015-04-15 22:55:55.
 
     SELECT DATE_ADD(timestamp '2015-04-15 22:55:55', interval '10' hour) FROM sys.version;
-    +------------+
-    |   EXPR$0   |
-    +------------+
-    | 2015-04-16 08:55:55.0 |
-    +------------+
-    1 row selected (0.068 seconds)
+    +------------------------+
+    |         EXPR$0         |
+    +------------------------+
+    | 2015-04-16 08:55:55.0  |
+    +------------------------+
+    1 row selected (0.199 seconds)
 
 Add 10 hours to the time 22 hours, 55 minutes, 55 seconds.
 
@@ -122,22 +122,22 @@ Add 10 hours to the time 22 hours, 55 minutes, 55 seconds.
 Add 1 year and 1 month to the timestamp 2015-04-15 22:55:55.
 
     SELECT DATE_ADD(timestamp '2015-04-15 22:55:55', interval '1-2' year to month) FROM sys.version;
-    +------------+
-    |   EXPR$0   |
-    +------------+
-    | 2016-06-15 22:55:55.0 |
-    +------------+
-    1 row selected (0.065 seconds)
+    +------------------------+
+    |         EXPR$0         |
+    +------------------------+
+    | 2016-06-15 22:55:55.0  |
+    +------------------------+
+    1 row selected (0.106 seconds)
 
 Add 1 day 2 and 1/2 hours and 45.100 seconds to the time 22:55:55.
 
     SELECT DATE_ADD(time '22:55:55', interval '1 2:30:45.100' day to second) FROM sys.version;
-    +------------+
-    |   EXPR$0   |
-    +------------+
-    | 01:26:40.100 |
-    +------------+
-    1 row selected (0.07 seconds)
+    +---------------+
+    |    EXPR$0     |
+    +---------------+
+    | 01:26:40.100  |
+    +---------------+
+    1 row selected (0.106 seconds)
 
 ## DATE_PART
 Returns a field of a date, time, timestamp, or interval.
@@ -234,12 +234,12 @@ Subtact two months from April 15, 2015.
 Subtract 10 hours from the timestamp 2015-04-15 22:55:55.
 
     SELECT DATE_SUB(timestamp '2015-04-15 22:55:55', interval '10' hour) FROM sys.version;
-    +------------+
-    |   EXPR$0   |
-    +------------+
-    | 2015-04-15 12:55:55.0 |
-    +------------+
-    1 row selected (0.068 seconds)
+    +------------------------+
+    |         EXPR$0         |
+    +------------------------+
+    | 2015-04-15 12:55:55.0  |
+    +------------------------+
+    1 row selected (0.108 seconds)
 
 Subtract 10 hours from the time 22 hours, 55 minutes, 55 seconds.
 
@@ -254,24 +254,24 @@ Subtract 10 hours from the time 22 hours, 55 minutes, 55 seconds.
 Subtract 1 year and 1 month from the timestamp 2015-04-15 22:55:55.
 
     SELECT DATE_SUB(timestamp '2015-04-15 22:55:55', interval '1-2' year to month) FROM sys.version;
-    +------------+
-    |   EXPR$0   |
-    +------------+
-    | 2014-02-15 22:55:55.0 |
-    +------------+
-    1 row selected (0.073 seconds)
+    +------------------------+
+    |         EXPR$0         |
+    +------------------------+
+    | 2014-02-15 22:55:55.0  |
+    +------------------------+
+    1 row selected (0.108 seconds)
 
 Subtract 1 day, 2 and 1/2 hours, and 45.100 seconds from the time 22:55:55.
 
     SELECT DATE_ADD(time '22:55:55', interval '1 2:30:45.100' day to second) FROM sys.version;
-    +------------+
-    |   EXPR$0   |
-    +------------+
-    | 01:26:40.100 |
-    +------------+
-    1 row selected (0.073 seconds)
+    +---------------+
+    |    EXPR$0     |
+    +---------------+
+    | 01:26:40.100  |
+    +---------------+
+    1 row selected (0.095 seconds)
 
-## CURRENT_*x*, LOCAL*x*, NOW, and TIMEOFDAY
+## Other Date and Time Functions
 
 The following examples show how to use these functions:
 
@@ -300,58 +300,58 @@ The following examples show how to use these functions:
         1 row selected (0.073 seconds)
 
         SELECT CURRENT_TIMESTAMP FROM sys.version;
-        +-------------------+
-        | current_timestamp |
-        +-------------------+
-        | 2015-04-02 14:32:34.047 |
-        +-------------------+
-        1 row selected (0.061 seconds)
+        +--------------------------+
+        |    CURRENT_TIMESTAMP     |
+        +--------------------------+
+        | 2015-05-17 22:45:55.848  |
+        +--------------------------+
+        1 row selected (0.109 seconds)
 
         SELECT LOCALTIME FROM sys.version;
 
-        +------------+
-        | localtime  |
-        +------------+
-        | 14:33:04.95 |
-        +------------+
-        1 row selected (0.051 seconds)
+        +---------------+
+        |   LOCALTIME   |
+        +---------------+
+        | 22:46:19.656  |
+        +---------------+
+        1 row selected (0.105 seconds)
 
         SELECT LOCALTIMESTAMP FROM sys.version;
 
-        +----------------+
-        | LOCALTIMESTAMP |
-        +----------------+
-        | 2015-04-02 23:13:13.204 |
-        +----------------+
-        1 row selected (0.105 seconds)
+        +--------------------------+
+        |      LOCALTIMESTAMP      |
+        +--------------------------+
+        | 2015-05-17 22:46:47.944  |
+        +--------------------------+
+        1 row selected (0.08 seconds)
 
         SELECT NOW() FROM sys.version;
-        +------------+
-        |   EXPR$0   |
-        +------------+
-        | 2015-04-02 23:14:30.076 |
-        +------------+
-        1 row selected (0.05 seconds)
+        +--------------------------+
+        |          EXPR$0          |
+        +--------------------------+
+        | 2015-05-17 22:47:11.008  |
+        +--------------------------+
+        1 row selected (0.085 seconds)
 
-If you set up Drill for [UTC time]({{ site.baseurl }}/docs/casting-converting-data-types/time-zone-limitation), TIMEOFDAY returns the result for the UTC time zone.
+If you set up Drill for [UTC time]({{ site.baseurl }}/docs/data-type-conversion/#time-zone-limitation), TIMEOFDAY returns the result for the UTC time zone.
 
     SELECT TIMEOFDAY() FROM sys.version;
-    +------------+
-    |   EXPR$0   |
-    +------------+
+    +-----------------------------+
+    |           EXPR$0            |
+    +-----------------------------+
     | 2015-04-02 22:05:02.424 UTC |
-    +------------+
+    +-----------------------------+
     1 row selected (1.191 seconds)
 
 If you did not set up Drill for UTC time, TIMEOFDAY returns the local date and time with time zone information.
 
     SELECT TIMEOFDAY() FROM sys.version;
-    +------------+
-    |   EXPR$0   |
-    +------------+
-    | 2015-04-02 15:01:31.114 America/Los_Angeles |
-    +------------+
-    1 row selected (1.199 seconds)
+    +----------------------------------------------+
+    |                    EXPR$0                    |
+    +----------------------------------------------+
+    | 2015-05-17 22:47:38.012 America/Los_Angeles  |
+    +----------------------------------------------+
+    1 row selected (0.08 seconds)
 
 ## EXTRACT
 

http://git-wip-us.apache.org/repos/asf/drill/blob/5f6a51af/_docs/sql-reference/sql-functions/040-string-manipulation.md
----------------------------------------------------------------------
diff --git a/_docs/sql-reference/sql-functions/040-string-manipulation.md b/_docs/sql-reference/sql-functions/040-string-manipulation.md
index ffb3173..4c5a09a 100644
--- a/_docs/sql-reference/sql-functions/040-string-manipulation.md
+++ b/_docs/sql-reference/sql-functions/040-string-manipulation.md
@@ -92,12 +92,12 @@ Concatenates arguments.
 
     SELECT CONCAT('Drill', ' ', 1.0, ' ', 'release') FROM sys.version;
 
-    +------------+
-    |   EXPR$0   |
-    +------------+
-    | Drill 1.0 release |
-    +------------+
-    1 row selected (0.221 seconds)
+    +--------------------+
+    |       EXPR$0       |
+    +--------------------+
+    | Drill 1.0 release  |
+    +--------------------+
+    1 row selected (0.134 seconds)
 
 Alternatively, you can use the [string concatenation operation]({{ site.baseurl }}/docs/operators#string-concatenate-operator) to concatenate strings.
 
@@ -112,12 +112,12 @@ Returns the string using initial caps.
 
     SELECT INITCAP('apache drill release 1.0') FROM sys.version;
 
-    +------------+
-    |   EXPR$0   |
-    +------------+
-    | Apache Drill Release 1.0 |
-    +------------+
-    1 row selected (0.124 seconds)
+    +---------------------------+
+    |          EXPR$0           |
+    +---------------------------+
+    | Apache Drill Release 1.0  |
+    +---------------------------+
+    1 row selected (0.106 seconds)
 
 ## LENGTH
 Returns the number of characters in the string.
@@ -159,12 +159,12 @@ Converts characters in the string to lowercase.
 
     SELECT LOWER('Apache Drill') FROM sys.version;
 
-    +------------+
-    |   EXPR$0   |
-    +------------+
-    | apache drill |
-    +------------+
-    1 row selected (0.113 seconds)
+    +---------------+
+    |    EXPR$0     |
+    +---------------+
+    | apache drill  |
+    +---------------+
+    1 row selected (0.103 seconds)
 
 ## LPAD
 Pads the string to the length specified by prepending the fill or a space. Truncates the string if longer than the specified length.
@@ -178,12 +178,12 @@ Pads the string to the length specified by prepending the fill or a space. Trunc
 
     SELECT LPAD('Release 1.0', 27, 'of Apache Drill 1.0') FROM sys.version;
 
-    +------------+
-    |   EXPR$0   |
-    +------------+
-    | of Apache Drill Release 1.0 |
-    +------------+
-    1 row selected (0.112 seconds)
+    +------------------------------+
+    |            EXPR$0            |
+    +------------------------------+
+    | of Apache Drill Release 1.0  |
+    +------------------------------+
+    1 row selected (0.132 seconds)
 
 ## LTRIM
 Removes any characters from the beginning of string1 that match the characters in string2. 
@@ -205,12 +205,12 @@ Removes any characters from the beginning of string1 that match the characters i
 
     SELECT LTRIM('A powerful tool Apache Drill', 'Apache ') FROM sys.version;
 
-    +------------+
-    |   EXPR$0   |
-    +------------+
-    | owerful tool Apache Drill |
-    +------------+
-    1 row selected (0.07 seconds)
+    +----------------------------+
+    |           EXPR$0           |
+    +----------------------------+
+    | owerful tool Apache Drill  |
+    +----------------------------+
+    1 row selected (0.1 seconds)
 
 ## POSITION
 Returns the location of a substring.
@@ -249,22 +249,23 @@ Substitutes new text for substrings that match [POSIX regular expression pattern
 Replace a's with b's in this string.
 
     SELECT REGEXP_REPLACE('abc, acd, ade, aef', 'a', 'b') FROM sys.version;
-    +------------+
-    |   EXPR$0   |
-    +------------+
-    | bbc, bcd, bde, bef |
-    +------------+
+    +---------------------+
+    |       EXPR$0        |
+    +---------------------+
+    | bbc, bcd, bde, bef  |
+    +---------------------+
+    1 row selected (0.105 seconds)
 
 
 Use the regular expression *a* followed by a period (.) in the same query to replace all a's and the subsequent character.
 
     SELECT REGEXP_REPLACE('abc, acd, ade, aef', 'a.','b') FROM sys.version;
-    +------------+
-    |   EXPR$0   |
-    +------------+
-    | bc, bd, be, bf |
-    +------------+
-    1 row selected (0.099 seconds)
+    +-----------------+
+    |     EXPR$0      |
+    +-----------------+
+    | bc, bd, be, bf  |
+    +-----------------+
+    1 row selected (0.113 seconds)
 
 
 ## RPAD
@@ -277,12 +278,12 @@ Pads the string to the length specified. Appends the text you specify after the
 ### RPAD Example
 
     SELECT RPAD('Apache Drill ', 22, 'Release 1.0') FROM sys.version;
-    +------------+
-    |   EXPR$0   |
-    +------------+
-    | Apache Drill Release 1 |
-    +------------+
-    1 row selected (0.15 seconds)
+    +-------------------------+
+    |         EXPR$0          |
+    +-------------------------+
+    | Apache Drill Release 1  |
+    +-------------------------+
+    1 row selected (0.107 seconds)
 
 ## RTRIM
 Removes any characters from the end of string1 that match the characters in string2.  
@@ -303,12 +304,12 @@ Removes any characters from the end of string1 that match the characters in stri
     1 row selected (0.135 seconds)
 
     SELECT RTRIM('1.0 Apache Tomcat 1.0', 'Drill 1.0') from sys.version;
-    +------------+
-    |   EXPR$0   |
-    +------------+
-    | 1.0 Apache Tomcat |
-    +------------+
-    1 row selected (0.088 seconds)
+    +--------------------+
+    |       EXPR$0       |
+    +--------------------+
+    | 1.0 Apache Tomcat  |
+    +--------------------+
+    1 row selected (0.102 seconds)
 
 ## STRPOS
 Returns the location of the substring in a string.
@@ -377,20 +378,20 @@ Removes any characters from the beginning, end, or both sides of string2 that ma
     1 row selected (0.172 seconds)
 
     SELECT TRIM(both 'l' from 'long live Drill') FROM sys.version;
-    +------------+
-    |   EXPR$0   |
-    +------------+
-    | ong live Dri |
-    +------------+
-    1 row selected (0.087 seconds)
+    +---------------+
+    |    EXPR$0     |
+    +---------------+
+    | ong live Dri  |
+    +---------------+
+    1 row selected (0.104 seconds)
 
     SELECT TRIM(leading 'l' from 'long live Drill') FROM sys.version;
-    +------------+
-    |   EXPR$0   |
-    +------------+
-    | ong live Drill |
-    +------------+
-    1 row selected (0.077 seconds)
+    +-----------------+
+    |     EXPR$0      |
+    +-----------------+
+    | ong live Drill  |
+    +-----------------+
+    1 row selected (0.101 seconds)
 
 ## UPPER
 Converts characters in the string to uppercase.
@@ -403,9 +404,9 @@ Converts characters in the string to uppercase.
 
     SELECT UPPER('Apache Drill') FROM sys.version;
 
-    +------------+
-    |   EXPR$0   |
-    +------------+
-    | APACHE DRILL |
-    +------------+
-    1 row selected (0.104 seconds)
\ No newline at end of file
+    +---------------+
+    |    EXPR$0     |
+    +---------------+
+    | APACHE DRILL  |
+    +---------------+
+    1 row selected (0.081 seconds)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/drill/blob/5f6a51af/_docs/sql-reference/sql-functions/050-aggregate-and-aggregate-statistical.md
----------------------------------------------------------------------
diff --git a/_docs/sql-reference/sql-functions/050-aggregate-and-aggregate-statistical.md b/_docs/sql-reference/sql-functions/050-aggregate-and-aggregate-statistical.md
index 21e6e40..ff1305d 100644
--- a/_docs/sql-reference/sql-functions/050-aggregate-and-aggregate-statistical.md
+++ b/_docs/sql-reference/sql-functions/050-aggregate-and-aggregate-statistical.md
@@ -44,11 +44,11 @@ MIN, MAX, COUNT, AVG, and SUM accept ALL and DISTINCT keywords. The default is A
     13 rows selected (0.056 seconds)
 
     SELECT AVG(ALL a2) FROM t2;
-    +------------+
-    |   EXPR$0   |
-    +------------+
+    +--------------------+
+    |        EXPR$0      |
+    +--------------------+
     | 4.3076923076923075 |
-    +------------+
+    +--------------------+
     1 row selected (0.084 seconds)
 
     SELECT AVG(DISTINCT a2) FROM t2;

http://git-wip-us.apache.org/repos/asf/drill/blob/5f6a51af/_docs/tutorials/010-tutorials-introduction.md
----------------------------------------------------------------------
diff --git a/_docs/tutorials/010-tutorials-introduction.md b/_docs/tutorials/010-tutorials-introduction.md
index 178e721..46c782c 100644
--- a/_docs/tutorials/010-tutorials-introduction.md
+++ b/_docs/tutorials/010-tutorials-introduction.md
@@ -12,13 +12,13 @@ If you've never used Drill, use these tutorials to download, install, and start
   Explore data using a Hadoop environment pre-configured with Drill.  
 * [Analyzing Highly Dynamic Datasets]({{site.baseurl}}/docs/analyzing-highly-dynamic-datasets)  
   Delve into changing data without creating a schema or going through an ETL phase.
-* [Analyzing Social Media]({site.baseurl}}/docs/analyzing-social-media)  
+* [Analyzing Social Media]({{site.baseurl}}/docs/analyzing-social-media)  
   Analyze Twitter data in native JSON format using Apache Drill.  
 * [Tableau Examples]({{site.baseurl}}/docs/tableau-examples)  
   Access Hive tables in Tableau.  
-* [Using MicroStrategy Analytics with Apache Drill]({{site.baseurl}}/docs/using-microstrategy-analytics-with--apache-drill/)  
+* [Using MicroStrategy Analytics with Apache Drill]({{site.baseurl}}/docs/using-microstrategy-analytics-with-apache-drill/)  
   Use the Drill ODBC driver from MapR to analyze data and generate a report using Drill from the MicroStrategy UI.  
-* [Using Tibco Spotfire Server with Drill]({{site.baseurl}}/drill/docs/using-tibco-spotfire-with-drill/)  
+* [Using Tibco Spotfire Server with Drill]({{site.baseurl}}/docs/using-tibco-spotfire-with-drill/)  
   Use the Apache Drill to query complex data structures from Tibco Spotfire Desktop.
 * [Configuring Tibco Spotfire Server with Drill]({{site.baseurl}}/docs/configuring-tibco-spotfire-server-with-drill)  
   Integrate Tibco Spotfire Server with Apache Drill and explore multiple data formats on Hadoop.  

http://git-wip-us.apache.org/repos/asf/drill/blob/5f6a51af/_docs/tutorials/020-drill-in-10-minutes.md
----------------------------------------------------------------------
diff --git a/_docs/tutorials/020-drill-in-10-minutes.md b/_docs/tutorials/020-drill-in-10-minutes.md
index 95db7a9..5a51043 100755
--- a/_docs/tutorials/020-drill-in-10-minutes.md
+++ b/_docs/tutorials/020-drill-in-10-minutes.md
@@ -11,7 +11,7 @@ without having to perform any setup tasks.
 
 ## Installation Overview
 
-You can install Drill in embedded mode on a machine running Linux, Mac OS X, or Windows. For information about running Drill in distributed mode, see  [Deploying Drill in a Cluster]({{ site.baseurl }}/docs/deploying-drill-in-a-cluster).
+You can install Drill in embedded mode on a machine running Linux, Mac OS X, or Windows. For information about installing Drill in distributed mode, see [Installing Drill in Distributed Mode]({{ site.baseurl }}/docs/installing-drill-in-distributed-mode).
 
 This installation procedure includes how to download the Apache Drill archive and extract the contents to a directory on your machine. The Apache Drill archive contains sample JSON and Parquet files that you can query immediately.
 

http://git-wip-us.apache.org/repos/asf/drill/blob/5f6a51af/_docs/tutorials/030-analyzing-the-yelp-academic-dataset.md
----------------------------------------------------------------------
diff --git a/_docs/tutorials/030-analyzing-the-yelp-academic-dataset.md b/_docs/tutorials/030-analyzing-the-yelp-academic-dataset.md
index 3a53efd..1eb2728 100644
--- a/_docs/tutorials/030-analyzing-the-yelp-academic-dataset.md
+++ b/_docs/tutorials/030-analyzing-the-yelp-academic-dataset.md
@@ -2,17 +2,14 @@
 title: "Analyzing the Yelp Academic Dataset"
 parent: "Tutorials"
 ---
-[Apache Drill](https://www.mapr.com/products/apache-drill) is one of the
-fastest growing open source projects, with the community making rapid progress
-with monthly releases. The key difference is Drill’s agility and flexibility.
+Apache Drill is one of the fastest growing open source projects, with the community making rapid progress with monthly releases. The key difference is Drill’s agility and flexibility.
 Along with meeting the table stakes for SQL-on-Hadoop, which is to achieve low
 latency performance at scale, Drill allows users to analyze the data without
 any ETL or up-front schema definitions. The data could be in any file format
 such as text, JSON, or Parquet. Data could have simple types such as string,
 integer, dates, or more complex multi-structured data, such as nested maps and
 arrays. Data can exist in any file system, local or distributed, such as HDFS,
-[MapR FS](https://www.mapr.com/blog/comparing-mapr-fs-and-hdfs-nfs-and-
-snapshots), or S3. Drill, has a “no schema” approach, which enables you to get
+MapR FS, or S3. Drill, has a “no schema” approach, which enables you to get
 value from your data in just a few minutes.
 
 Let’s quickly walk through the steps required to install Drill and run it
@@ -28,7 +25,7 @@ example is downloadable from [Yelp](http://www.yelp.com/dataset_challenge)
 
 [http://drill.apache.org/download/](http://drill.apache.org/download/)
 
-You can also [deploy Drill in clustered mode]({{ site.baseurl }}/docs/deploying-drill-in-a-cluster) if you
+You can also [in Drill in distributed mode]({{ site.baseurl }}/docs/installing-drill-in-distributed-mode) if you
 want to scale your environment.
 
 ### Step 2 : Open the Drill tar file
@@ -417,8 +414,8 @@ Stay tuned for more features and upcoming activities in the Drill community.
 
 To learn more about Drill, please refer to the following resources:
 
-  * Download Drill here:<http://getdrill.org/drill/download>
-  * 10 reasons we think Drill is cool:</docs/why-drill>
+  * Download Drill here: <http://getdrill.org/drill/download>
+  * [10 reasons we think Drill is cool]({{site.baseurl}}/docs/why-drill)
   * [A simple 10-minute tutorial]({{ site.baseurl }}/docs/drill-in-10-minutes>)
   * [More tutorials]({{ site.baseurl }}/docs/tutorials-introduction/)
 

http://git-wip-us.apache.org/repos/asf/drill/blob/5f6a51af/_docs/tutorials/050-analyzing-highly-dynamic-datasets.md
----------------------------------------------------------------------
diff --git a/_docs/tutorials/050-analyzing-highly-dynamic-datasets.md b/_docs/tutorials/050-analyzing-highly-dynamic-datasets.md
index bb5680b..69cf93d 100644
--- a/_docs/tutorials/050-analyzing-highly-dynamic-datasets.md
+++ b/_docs/tutorials/050-analyzing-highly-dynamic-datasets.md
@@ -104,7 +104,7 @@ You can get value from the data quickly by applying both KVGEN and FLATTEN funct
 
 On the output of flattened data, you use standard SQL functionality such as filters , aggregates, and sort. Let’s see a few examples.
 
-## Get the total number of check-ins recorded in the Yelp dataset
+### Get the total number of check-ins recorded in the Yelp dataset
 
     0: jdbc:drill:zk=local> SELECT SUM(checkintbl.checkins.`value`) AS TotalCheckins FROM (
     . . . . . . . . . . . >  SELECT FLATTEN(KVGEN(checkin_info)) checkins FROM dfs.`/users/nrentachintala/Downloads/yelp/yelp_academic_dataset_checkin.json` ) checkintbl
@@ -115,7 +115,7 @@ On the output of flattened data, you use standard SQL functionality such as filt
     | 4713811       |
     +---------------+
 
-## Get the number of check-ins specifically for Sunday midnights
+### Get the number of check-ins specifically for Sunday midnights
 
     0: jdbc:drill:zk=local> SELECT SUM(checkintbl.checkins.`value`) AS SundayMidnightCheckins FROM (
     . . . . . . . . . . . >  SELECT FLATTEN(KVGEN(checkin_info)) checkins FROM dfs.`/users/nrentachintala/Downloads/yelp/yelp_academic_dataset_checkin.json` ) checkintbl WHERE checkintbl.checkins.key='23-0';
@@ -125,7 +125,7 @@ On the output of flattened data, you use standard SQL functionality such as filt
     | 8575                   |
     +------------------------+
 
-## Get the number of check-ins per day of the week
+### Get the number of check-ins per day of the week
 
     0: jdbc:drill:zk=local> SELECT `right`(checkintbl.checkins.key,1) WeekDay,sum(checkintbl.checkins.`value`) TotalCheckins from (
     . . . . . . . . . . . >  select flatten(kvgen(checkin_info)) checkins FROM dfs.`/users/nrentachintala/Downloads/yelp/yelp_academic_dataset_checkin.json`  ) checkintbl GROUP BY `right`(checkintbl.checkins.key,1) ORDER BY TotalCheckins;
@@ -141,7 +141,7 @@ On the output of flattened data, you use standard SQL functionality such as filt
     | 5          | 937201        |
     +------------+---------------+
 
-## Get the number of check-ins per hour of the day
+### Get the number of check-ins per hour of the day
 
     0: jdbc:drill:zk=local> SELECT SUBSTR(checkintbl.checkins.key,1,strpos(checkintbl.checkins.key,'-')-1) AS HourOfTheDay ,SUM(checkintbl.checkins.`value`) TotalCheckins FROM (
     . . . . . . . . . . . >  SELECT FLATTEN(KVGEN(checkin_info)) checkins FROM dfs.`/users/nrentachintala/Downloads/yelp/yelp_academic_dataset_checkin.json` ) checkintbl GROUP BY SUBSTR(checkintbl.checkins.key,1,strpos(checkintbl.checkins.key,'-')-1) ORDER BY TotalCheckins;

http://git-wip-us.apache.org/repos/asf/drill/blob/5f6a51af/_docs/tutorials/learn-drill-with-the-mapr-sandbox/020-getting-to-know-the-drill-sandbox.md
----------------------------------------------------------------------
diff --git a/_docs/tutorials/learn-drill-with-the-mapr-sandbox/020-getting-to-know-the-drill-sandbox.md b/_docs/tutorials/learn-drill-with-the-mapr-sandbox/020-getting-to-know-the-drill-sandbox.md
index bf1a136..cb679bf 100644
--- a/_docs/tutorials/learn-drill-with-the-mapr-sandbox/020-getting-to-know-the-drill-sandbox.md
+++ b/_docs/tutorials/learn-drill-with-the-mapr-sandbox/020-getting-to-know-the-drill-sandbox.md
@@ -18,8 +18,6 @@ Drill includes a shell for connecting to relational databases and executing SQL
     sqlline version 1.1.6
     0: jdbc:drill:>
 
-[Starting SQLLine outside the sandbox]({{ site.baseurl }}/docs/install-drill) for use with Drill requires entering a few options, covered in the section, ["Install Drill"](docs/install-drill/). 
-
 In this tutorial you query a number of data sets, including Hive and HBase, and files on the file system, such as CSV, JSON, and Parquet files. To access these diverse data sources, you connect Drill to storage plugins. 
 
 ## Storage Plugin Overview


[28/31] drill git commit: duplicate win install fix

Posted by ts...@apache.org.
duplicate win install fix


Project: http://git-wip-us.apache.org/repos/asf/drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/drill/commit/e331872f
Tree: http://git-wip-us.apache.org/repos/asf/drill/tree/e331872f
Diff: http://git-wip-us.apache.org/repos/asf/drill/diff/e331872f

Branch: refs/heads/gh-pages
Commit: e331872fbc26171c3096e717fc17ae01cd83bf3b
Parents: 34c8b14
Author: Kristine Hahn <kh...@maprtech.com>
Authored: Mon May 18 14:47:30 2015 -0700
Committer: Kristine Hahn <kh...@maprtech.com>
Committed: Mon May 18 14:47:30 2015 -0700

----------------------------------------------------------------------
 _docs/getting-started/010-drill-introduction.md                    | 2 +-
 .../040-installing-drill-on-windows.md                             | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/drill/blob/e331872f/_docs/getting-started/010-drill-introduction.md
----------------------------------------------------------------------
diff --git a/_docs/getting-started/010-drill-introduction.md b/_docs/getting-started/010-drill-introduction.md
index de01c2b..a71c861 100644
--- a/_docs/getting-started/010-drill-introduction.md
+++ b/_docs/getting-started/010-drill-introduction.md
@@ -13,7 +13,7 @@ with existing Apache Hive and Apache HBase deployments.
 
 Apache Drill 1.0 offers the following new features:
 
-* Many performance planning and execution [improvements](/docs/performance-tuning-introduction/), including a new text reader for faster join planning that complies with RFC 4180.
+* Many performance planning and execution [improvements](/docs/performance-tuning-introduction/).
 * Updated [Drill shell]({{site.baseurl}}/docs/configuring-the-drill-shell/#examples-of-configuring-the-drill-shell) and now formats query results having fewer than 70 characters in a column.
 * [Query audit logging]({{site.baseurl}}/docs/getting-query-information/) for getting the query history on a Drillbit.
 * Improved connection handling.

http://git-wip-us.apache.org/repos/asf/drill/blob/e331872f/_docs/install/installing-drill-in-embedded-mode/040-installing-drill-on-windows.md
----------------------------------------------------------------------
diff --git a/_docs/install/installing-drill-in-embedded-mode/040-installing-drill-on-windows.md b/_docs/install/installing-drill-in-embedded-mode/040-installing-drill-on-windows.md
index 05328c7..a8f82fa 100755
--- a/_docs/install/installing-drill-in-embedded-mode/040-installing-drill-on-windows.md
+++ b/_docs/install/installing-drill-in-embedded-mode/040-installing-drill-on-windows.md
@@ -5,7 +5,7 @@ parent: "Installing Drill in Embedded Mode"
 You can install Drill on Windows 7 or 8. First, check that you [meet the prerequisites]({{site.baseurl}}/docs/embedded-mode-prerequisites), including setting the JAVA_HOME environment variable, and then install Drill. Complete the following steps to install Drill:
 
 1. Click the following link to download the latest, stable version of Apache Drill:  [http://getdrill.org/drill/download/apache-drill-0.1.0.tar.gz](http://getdrill.org/drill/download/apache-drill-0.9.0.tar.gz)
-2. Move the `apache-drill-0.1.0.tar.gz` file to a directory where you want to install Drill.
+2. Move the `apache-drill-1.0.0.tar.gz` file to a directory where you want to install Drill.
 3. Unzip the `TAR.GZ` file using a third-party tool. If the tool you use does not unzip the TAR file as well as the `TAR.GZ` file, unzip the `apache-drill-0.1.0.tar` to extract the Drill software. The extraction process creates the installation directory named apache-drill-0.1.0 containing the Drill software. For example:
    ![drill install dir]({{ site.baseurl }}/docs/img/drill-directory.png)