You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@drill.apache.org by br...@apache.org on 2015/07/02 01:45:25 UTC

[1/2] drill git commit: fix links

Repository: drill
Updated Branches:
  refs/heads/gh-pages 3a9872370 -> 4be94a15d


fix links

remove angle brackets

fix formatting problem

fix format

fix file name

minor edit

DRILL-3246

Bridget's 1.1 updates


Project: http://git-wip-us.apache.org/repos/asf/drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/drill/commit/97a211ee
Tree: http://git-wip-us.apache.org/repos/asf/drill/tree/97a211ee
Diff: http://git-wip-us.apache.org/repos/asf/drill/diff/97a211ee

Branch: refs/heads/gh-pages
Commit: 97a211eeeb5b6e7956b867b5ae731f21d610941b
Parents: 3a98723
Author: Kristine Hahn <kh...@maprtech.com>
Authored: Wed Jul 1 08:58:18 2015 -0700
Committer: Kristine Hahn <kh...@maprtech.com>
Committed: Wed Jul 1 16:34:45 2015 -0700

----------------------------------------------------------------------
 _data/docs.json                                 | 77 +++++++++++++++---
 _docs/110-troubleshooting.md                    | 12 +--
 ...ser-impersonation-with-hive-authorization.md | 83 +++++++++++++-------
 .../020-start-up-options.md                     | 16 ++--
 _docs/rn/070-0.9.0-rn.md                        | 29 +++++++
 .../sql-commands/005-supported-sql-commands.md  | 18 ++++-
 .../sql-commands/010-alter-session.md           |  2 +-
 .../sql-commands/020-alter-system.md            |  2 +-
 .../sql-commands/030-create-table-as.md         |  2 +-
 .../sql-commands/035-partition-by-clause.md     |  4 +-
 .../sql-commands/050-create-view.md             |  4 +-
 .../sql-commands/081-from-clause.md             | 32 ++++----
 .../sql-commands/089-with-clause.md             | 12 +--
 .../030-date-time-functions-and-arithmetic.md   |  2 +-
 .../010-sql-window-functions-introduction.md    | 17 ++--
 .../020-aggregate-window-functions.md           | 12 ++-
 .../030-ranking-window-functions.md             | 10 +--
 .../040-sql-window-functions-examples.md        |  4 +-
 18 files changed, 226 insertions(+), 112 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/drill/blob/97a211ee/_data/docs.json
----------------------------------------------------------------------
diff --git a/_data/docs.json b/_data/docs.json
index 819b1f2..8883f4c 100644
--- a/_data/docs.json
+++ b/_data/docs.json
@@ -326,7 +326,7 @@
             "parent": "Release Notes", 
             "previous_title": "Apache Drill 0.8.0 Release Notes", 
             "previous_url": "/docs/apache-drill-0-8-0-release-notes/", 
-            "relative_path": "_docs/rn/070-0.9.0-rn copy.md", 
+            "relative_path": "_docs/rn/070-0.9.0-rn.md", 
             "title": "Apache Drill 0.9.0 Release Notes", 
             "url": "/docs/apache-drill-0-9-0-release-notes/"
         }, 
@@ -338,8 +338,8 @@
                 }
             ], 
             "children": [], 
-            "next_title": "Sample Datasets", 
-            "next_url": "/docs/sample-datasets/", 
+            "next_title": "Apache Drill 1.1.0 Release Notes", 
+            "next_url": "/docs/apache-drill-1-1-0-release-notes/", 
             "parent": "Release Notes", 
             "previous_title": "Apache Drill 0.9.0 Release Notes", 
             "previous_url": "/docs/apache-drill-0-9-0-release-notes/", 
@@ -347,6 +347,23 @@
             "title": "Apache Drill 1.0.0 Release Notes", 
             "url": "/docs/apache-drill-1-0-0-release-notes/"
         }, 
+        "Apache Drill 1.1.0 Release Notes": {
+            "breadcrumbs": [
+                {
+                    "title": "Release Notes", 
+                    "url": "/docs/release-notes/"
+                }
+            ], 
+            "children": [], 
+            "next_title": "Sample Datasets", 
+            "next_url": "/docs/sample-datasets/", 
+            "parent": "Release Notes", 
+            "previous_title": "Apache Drill 1.0.0 Release Notes", 
+            "previous_url": "/docs/apache-drill-1-0-0-release-notes/", 
+            "relative_path": "_docs/rn/090-1.1.0-rn.md", 
+            "title": "Apache Drill 1.1.0 Release Notes", 
+            "url": "/docs/apache-drill-1-1-0-release-notes/"
+        }, 
         "Apache Drill Contribution Guidelines": {
             "breadcrumbs": [
                 {
@@ -7607,7 +7624,7 @@
                     "parent": "Release Notes", 
                     "previous_title": "Apache Drill 0.8.0 Release Notes", 
                     "previous_url": "/docs/apache-drill-0-8-0-release-notes/", 
-                    "relative_path": "_docs/rn/070-0.9.0-rn copy.md", 
+                    "relative_path": "_docs/rn/070-0.9.0-rn.md", 
                     "title": "Apache Drill 0.9.0 Release Notes", 
                     "url": "/docs/apache-drill-0-9-0-release-notes/"
                 }, 
@@ -7619,14 +7636,31 @@
                         }
                     ], 
                     "children": [], 
-                    "next_title": "Sample Datasets", 
-                    "next_url": "/docs/sample-datasets/", 
+                    "next_title": "Apache Drill 1.1.0 Release Notes", 
+                    "next_url": "/docs/apache-drill-1-1-0-release-notes/", 
                     "parent": "Release Notes", 
                     "previous_title": "Apache Drill 0.9.0 Release Notes", 
                     "previous_url": "/docs/apache-drill-0-9-0-release-notes/", 
                     "relative_path": "_docs/rn/080-1.0.0-rn.md", 
                     "title": "Apache Drill 1.0.0 Release Notes", 
                     "url": "/docs/apache-drill-1-0-0-release-notes/"
+                }, 
+                {
+                    "breadcrumbs": [
+                        {
+                            "title": "Release Notes", 
+                            "url": "/docs/release-notes/"
+                        }
+                    ], 
+                    "children": [], 
+                    "next_title": "Sample Datasets", 
+                    "next_url": "/docs/sample-datasets/", 
+                    "parent": "Release Notes", 
+                    "previous_title": "Apache Drill 1.0.0 Release Notes", 
+                    "previous_url": "/docs/apache-drill-1-0-0-release-notes/", 
+                    "relative_path": "_docs/rn/090-1.1.0-rn.md", 
+                    "title": "Apache Drill 1.1.0 Release Notes", 
+                    "url": "/docs/apache-drill-1-1-0-release-notes/"
                 }
             ], 
             "next_title": "Apache Drill 0.5.0 Release Notes", 
@@ -9907,8 +9941,8 @@
             "next_title": "AOL Search", 
             "next_url": "/docs/aol-search/", 
             "parent": "", 
-            "previous_title": "Apache Drill 1.0.0 Release Notes", 
-            "previous_url": "/docs/apache-drill-1-0-0-release-notes/", 
+            "previous_title": "Apache Drill 1.1.0 Release Notes", 
+            "previous_url": "/docs/apache-drill-1-1-0-release-notes/", 
             "relative_path": "_docs/140-sample-datasets.md", 
             "title": "Sample Datasets", 
             "url": "/docs/sample-datasets/"
@@ -15617,7 +15651,7 @@
                     "parent": "Release Notes", 
                     "previous_title": "Apache Drill 0.8.0 Release Notes", 
                     "previous_url": "/docs/apache-drill-0-8-0-release-notes/", 
-                    "relative_path": "_docs/rn/070-0.9.0-rn copy.md", 
+                    "relative_path": "_docs/rn/070-0.9.0-rn.md", 
                     "title": "Apache Drill 0.9.0 Release Notes", 
                     "url": "/docs/apache-drill-0-9-0-release-notes/"
                 }, 
@@ -15629,14 +15663,31 @@
                         }
                     ], 
                     "children": [], 
-                    "next_title": "Sample Datasets", 
-                    "next_url": "/docs/sample-datasets/", 
+                    "next_title": "Apache Drill 1.1.0 Release Notes", 
+                    "next_url": "/docs/apache-drill-1-1-0-release-notes/", 
                     "parent": "Release Notes", 
                     "previous_title": "Apache Drill 0.9.0 Release Notes", 
                     "previous_url": "/docs/apache-drill-0-9-0-release-notes/", 
                     "relative_path": "_docs/rn/080-1.0.0-rn.md", 
                     "title": "Apache Drill 1.0.0 Release Notes", 
                     "url": "/docs/apache-drill-1-0-0-release-notes/"
+                }, 
+                {
+                    "breadcrumbs": [
+                        {
+                            "title": "Release Notes", 
+                            "url": "/docs/release-notes/"
+                        }
+                    ], 
+                    "children": [], 
+                    "next_title": "Sample Datasets", 
+                    "next_url": "/docs/sample-datasets/", 
+                    "parent": "Release Notes", 
+                    "previous_title": "Apache Drill 1.0.0 Release Notes", 
+                    "previous_url": "/docs/apache-drill-1-0-0-release-notes/", 
+                    "relative_path": "_docs/rn/090-1.1.0-rn.md", 
+                    "title": "Apache Drill 1.1.0 Release Notes", 
+                    "url": "/docs/apache-drill-1-1-0-release-notes/"
                 }
             ], 
             "next_title": "Apache Drill 0.5.0 Release Notes", 
@@ -15706,8 +15757,8 @@
             "next_title": "AOL Search", 
             "next_url": "/docs/aol-search/", 
             "parent": "", 
-            "previous_title": "Apache Drill 1.0.0 Release Notes", 
-            "previous_url": "/docs/apache-drill-1-0-0-release-notes/", 
+            "previous_title": "Apache Drill 1.1.0 Release Notes", 
+            "previous_url": "/docs/apache-drill-1-1-0-release-notes/", 
             "relative_path": "_docs/140-sample-datasets.md", 
             "title": "Sample Datasets", 
             "url": "/docs/sample-datasets/"

http://git-wip-us.apache.org/repos/asf/drill/blob/97a211ee/_docs/110-troubleshooting.md
----------------------------------------------------------------------
diff --git a/_docs/110-troubleshooting.md b/_docs/110-troubleshooting.md
index 00edd6f..7332e31 100755
--- a/_docs/110-troubleshooting.md
+++ b/_docs/110-troubleshooting.md
@@ -32,26 +32,26 @@ If you have any of the following problems, try the suggested solution:
 * [Query Parsing Errors]({{site.baseurl}}/docs/troubleshooting/#query-parsing-errors)
 * [Query Parsing Errors Caused by Reserved Words]({{site.baseurl}}/docs/troubleshooting/#query-parsing-errors-caused-by-reserved-words)
 * [Table Not Found]({{site.baseurl}}/docs/troubleshooting/#table-not-found)
-* [Access Nested Fields without Table Name/Alias]({{site.baseurl}}/docs/troubleshooting/#access-nested-fields-without-table-name/alias)
+* [Access Nested Fields without Table Name/Alias]({{site.baseurl}}/docs/troubleshooting/#access-nested-fields-without-table-name-alias)
 * [Unexpected Null Values for Columns in Results]({{site.baseurl}}/docs/troubleshooting/#unexpected-null-values-for-columns-in-results)
 * [Using Functions with Incorrect Data Types]({{site.baseurl}}/docs/troubleshooting/#using-functions-with-incorrect-data-types)
 * [Query Takes a Long Time to Return]({{site.baseurl}}/docs/troubleshooting/#query-takes-a-long-time-to-return)
 * [Schema Changes]({{site.baseurl}}/docs/troubleshooting/#schema-changes)
 * [Timestamps and Timezones Other Than UTC]({{site.baseurl}}/docs/troubleshooting/#timestamps-and-timezones-other-than-utc)
 * [Unexpected ODBC Issues]({{site.baseurl}}/docs/troubleshooting/#unexpected-odbc-issues)
-* [JDBC/ODBC Connection Issues with ZooKeeper]({{site.baseurl}}/docs/troubleshooting/#jdbc/odbc-connection-issues-with-zookeeper)
+* [JDBC/ODBC Connection Issues with ZooKeeper]({{site.baseurl}}/docs/troubleshooting/#jdbc-odbc-connection-issues-with-zookeeper)
 * [Metadata Queries Take a Long Time to Return]({{site.baseurl}}/docs/troubleshooting/#metadata-queries-take-a-long-time-to-return)
 * [Unexpected Results Due to Implicit Casting]({{site.baseurl}}/docs/troubleshooting/#unexpected-results-due-to-implicit-casting)
 * [Column Alias Causes an Error]({{site.baseurl}}/docs/troubleshooting/#column-alias-causes-an-error)
-* [List (Array) Contains Null]({{site.baseurl}}/docs/troubleshooting/#list-(array)-contains-null)
-* [SELECT COUNT (*) Takes a Long Time to Run]({{site.baseurl}}/docs/troubleshooting/#select-count-(*)-takes-a-long-time-to-run)
+* [List (Array) Contains Null]({{site.baseurl}}/docs/troubleshooting/#list-array-contains-null)
+* [SELECT COUNT (*) Takes a Long Time to Run]({{site.baseurl}}/docs/troubleshooting/#select-count-takes-a-long-time-to-run)
 * [Tableau Issues]({{site.baseurl}}/docs/troubleshooting/#tableau-issues)
 * [GROUP BY Using Alias]({{site.baseurl}}/docs/troubleshooting/#group-by-using-alias)
 * [Casting a VARCHAR String to an INTEGER Causes an Error]({{site.baseurl}}/docs/troubleshooting/#casting-a-varchar-string-to-an-integer-causes-an-error)
 * [Unexpected Exception during Fragment Initialization]({{site.baseurl}}/docs/troubleshooting/#unexpected-exception-during-fragment-initialization)
 * [Queries Running Out of Memory]({{site.baseurl}}/docs/troubleshooting/#queries-running-out-of-memory)
 * [Unclear Error Message]({{site.baseurl}}/docs/troubleshooting/#unclear-error-message)
-* [SQLLine Error Starting Drill in Embedded Mode]({{site.baseurl}}/docs/troubleshooting/#sqlline-error-starting-drill-in-embedded-mode)
+* [Error Starting Drill in Embedded Mode]({{site.baseurl}}/docs/troubleshooting/#error-starting-drill-in-embedded-mode)
 
 ### Memory Issues
 Symptom: Memory problems occur when you run certain queries, such as those that perform window functions.
@@ -263,7 +263,7 @@ Solution: Turn on verbose errors.
 
 Determine your currently connected drillbit using `SELECT * FROM sys.drillbits.  Then review logs Drill logs from that drillbit.
 
-### SQLLine Error Starting Drill in Embedded Mode
+### Error Starting Drill in Embedded Mode
 
 Symptom:  
 

http://git-wip-us.apache.org/repos/asf/drill/blob/97a211ee/_docs/configure-drill/076-configuring-user-impersonation-with-hive-authorization.md
----------------------------------------------------------------------
diff --git a/_docs/configure-drill/076-configuring-user-impersonation-with-hive-authorization.md b/_docs/configure-drill/076-configuring-user-impersonation-with-hive-authorization.md
index d69cb9f..86a799b 100644
--- a/_docs/configure-drill/076-configuring-user-impersonation-with-hive-authorization.md
+++ b/_docs/configure-drill/076-configuring-user-impersonation-with-hive-authorization.md
@@ -19,14 +19,14 @@ For more information, see [Storage Based Authorization in the Metastore Server](
 
 ## SQL Standard Based Authorization  
 
-You can configure Hive SQL standard based authorization in Hive version 1.0 to work with Drill impersonation. The SQL standard based authorization model can control which users have access to columns, rows, and views. Users with the appropriate permissions can issue the GRANT and REVOKE statements to manage privileges from Hive.
+You can configure Hive SQL standard based authorization in Hive version 1.0 to work with impersonation in Drill 1.1. The SQL standard based authorization model can control which users have access to columns, rows, and views. Users with the appropriate permissions can issue the GRANT and REVOKE statements to manage privileges from Hive.
 
 For more information, see [SQL Standard Based Hive Authorization](https://cwiki.apache.org/confluence/display/HELIX/SQL+Standard+Based+Hive+Authorization).  
 
 
 ## Configuration  
 
-Once you determine which type of Hive authorization model that you want to use, enable impersonation in Drill. Update hive-site.xml with the relevant parameters for the type of authorization that you have decided to implement. Modify the Hive storage plugin instance in Drill with the relevant settings for the type of authorization.  
+Once you determine the Hive authorization model that you want to implement, enable impersonation in Drill. Update hive-site.xml with the relevant parameters for the authorization type. Modify the Hive storage plugin instance in Drill with the relevant settings for the authorization type.  
 
 ### Prerequisites  
 
@@ -46,7 +46,7 @@ Complete the following steps on each Drillbit node to enable user impersonation,
                  max_chained_user_hops: 3
           }
 
-3. Verify that enabled is set to `‘true’`.
+3. Verify that enabled is set to `"true"`.
 4. Set the maximum number of chained user hops that you want Drill to allow.
 5. (MapR clusters only) Add the following lines to the `drill-env.sh` file:
    * If the underlying file system is not secure, add the following line:
@@ -70,18 +70,33 @@ Update hive-site.xml with the parameters specific to the type of authorization t
 
 ### Storage Based Authorization  
 
-Add the required parameters to the hive-site.xml file to configure storage based authentication.  
+Add the following required authorization parameters in hive-site.xml to configure storage based authentication:  
+
+**hive.metastore.pre.event.listeners**  
+**Description:** Turns on metastore-side security.  
+**Value:** org.apache.hadoop.hive.ql.security.authorization.AuthorizationPreEventListener  
+
+**hive.security.metastore.authorization.manager**  
+**Description:** Tells Hive which metastore-side authorization provider to use. The default setting uses DefaultHiveMetastoreAuthorizationProvider, which implements the standard Hive grant/revoke model. To use an HDFS permission-based model (recommended) for authorization, use StorageBasedAuthorizationProvider.  
+**Value:** org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider  
+
+**hive.security.metastore.authenticator.manager**  
+**Description:** The authenticator manager class name in the metastore for authentication.  
+**Value:** org.apache.hadoop.hive.ql.security.HadoopDefaultMetastoreAuthenticator  
+
+**hive.security.metastore.authorization.auth.reads**  
+**Description:** Tells Hive metastore authorization checks for read access.  
+**Value:** true  
+
+**hive.metastore.execute.setugi**  
+**Description:** Causes the metastore to execute file system operations using the client's reported user and group permissions. You must set this property on both the client and server sides. If client sets it to true and server sets it to false, the client setting is ignored.
+**Value:** true 
+
+**hive.server2.enable.doAs**  
+**Description:** Tells HiveServer2 to execute Hive operations as the user making the calls. 
+**Value:** true 
 
-The following table lists the required authorization parameters and their descriptions:  
 
-| Property                                         | Value                                                                                      | Description                                                                                                                                                                                                                                                                                                    |
-|--------------------------------------------------|--------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| hive.metastore.pre.event.listeners               | Set to: org.apache.hadoop.hive.ql.security.authorization.AuthorizationPreEventListener     | Turns on metastore-side security.                                                                                                                                                                                                                                                                              |
-| hive.security.metastore.authorization.manager    | Set to: org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider | Tells Hive which metastore-side authorization provider to use. The default setting uses DefaultHiveMetastoreAuthorizationProvider, which implements the standard Hive grant/revoke model. To use an HDFS permission-based model (recommended) to do your authorization, use StorageBasedAuthorizationProvider. |
-| hive.security.metastore.authenticator.manager    | Set to:org.apache.hadoop.hive.ql.security.HadoopDefaultMetastoreAuthenticator              | The authenticator manager class name in the metastore for authentication.                                                                                                                                                                                                                                      |
-| hive.security.metastore.authorization.auth.reads | Set to “true”.                                                                             | Tells Hive metastore authorization checks for read access.                                                                                                                                                                                                                                                     |
-| hive.metastore.execute.setugi                    | Set to “true”.                                                                             | Causes the metastore to execute file system operations using the client's reported user and group permissions. You must set this property on both the client and server sides. If client sets it to true and server sets it to false, the client setting is ignored.                                           |
-| hive.server2.enable.doAs                         | Set to “true”.                                                                             | Tells HiveServer2 to execute Hive operations as the user making the calls.                                                                                                                                                                                                                                     |  
 
 ### Example hive-site.xml Settings for Storage Based Authorization  
 
@@ -117,18 +132,31 @@ The following table lists the required authorization parameters and their descri
 
 ## SQL Standard Based Authorization  
 
-Add the required parameters to the hive-site.xml file to configure SQL standard based authorization.  
+Add the following required authorization parameters in hive-site.xml to configure SQL standard based authentication:  
+
+**hive.security.authorization.enabled**  
+**Description:** Enables/disables Hive security authorization.   
+**Value:** true 
 
-The following table lists the required authorization parameters and their descriptions:  
+**hive.security.authenticator.manager**  
+**Description:** Class that implements HiveAuthenticationProvider to provide the client’s username and groups.  
+**Value:** Set to:org.apache.hadoop.hive.ql.security.SessionStateUserAuthenticator  
 
-| Property                            | Value                                                                                             | Description                                                                                                                                                                                                                                                                                                                  |
-|-------------------------------------|---------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| hive.security.authorization.enabled | Set to true.                                                                                      | Enables/disables Hive security authorization.                                                                                                                                                                                                                                                                                |
-| hive.security.authenticator.manager | Set to:org.apache.hadoop.hive.ql.security.SessionStateUserAuthenticator                           | Class that implements HiveAuthenticationProvider to provide the client’s username and groups.                                                                                                                                                                                                                                |
-| hive.security.authorization.manager | Set to:org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory | The Hive client authorization manager class name.                                                                                                                                                                                                                                                                            |
-| hive.server2.enable.doAs            | Set to false.                                                                                     | Tells HiveServer2 to execute Hive operations as the user making the calls.                                                                                                                                                                                                                                                   |
-| hive.users.in.admin.role            | Set to the list of comma-separated users who need to be added to the admin role.                  | A comma separated list of users which gets added to the ADMIN role when the metastore starts up. You can add more uses at any time. Note that a user who belongs to the admin role needs to run the "set role" command before getting the privileges of the admin role, as this role is not in the current roles by default. |
-| hive.metastore.execute.setugi       | Set to false.                                                                                     | Causes the metastore to execute file system operations using the client's reported user and group permissions. You must set this property on both the client and server sides. If client sets it to true and server sets it to false, the client setting is ignored.                                                         |  
+**hive.security.authorization.manager**  
+**Description:** The Hive client authorization manager class name.   
+**Value:** org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory  
+
+**hive.server2.enable.doAs**  
+**Description:** Tells HiveServer2 to execute Hive operations as the user making the calls.   
+**Value:** false  
+
+**hive.users.in.admin.role**  
+**Description:** A comma separated list of users which gets added to the ADMIN role when the metastore starts up. You can add more uses at any time. Note that a user who belongs to the admin role needs to run the "set role" command before getting the privileges of the admin role, as this role is not in the current roles by default.  
+**Value:** Set to the list of comma-separated users who need to be added to the admin role. 
+
+**hive.metastore.execute.setugi**  
+**Description:** Causes the metastore to execute file system operations using the client's reported user and group permissions. You must set this property on both the client and server side. If the client is set to true and the server is set to false, the client setting is ignored.  
+**Value:** false 
 
 ### Example hive-site.xml Settings for SQL Standard Based Authorization   
 
@@ -170,8 +198,8 @@ Note: The metastore host port for MapR is typically 9083.
 
 Complete the following steps to modify the Hive storage plugin:  
 
-1.  Navigate to http://<drillbit_hostname>:8047, and select the Storage tab.  
-2.  Click Update next to the hive instance.  
+1.  Navigate to `http://<drillbit_hostname>:8047`, and select the **Storage tab**.  
+2.  Click **Update** next to the hive instance.  
 3.  In the configuration window, add the configuration settings for the authorization type.  
        * For storage based authorization, add the following settings:  
 
@@ -185,10 +213,11 @@ Complete the following steps to modify the Hive storage plugin:
                  "hive.server2.enable.doAs" : "true",
                  "hive.metastore.execute.setugi" : "true"
                }
-              }  
-
+              }  
+
        * For SQL standard based authorization, add the following settings:  
 
+
               {
                type:"hive",
                enabled: true,

http://git-wip-us.apache.org/repos/asf/drill/blob/97a211ee/_docs/configure-drill/configuration-options/020-start-up-options.md
----------------------------------------------------------------------
diff --git a/_docs/configure-drill/configuration-options/020-start-up-options.md b/_docs/configure-drill/configuration-options/020-start-up-options.md
index 2f1ee5d..7149f81 100644
--- a/_docs/configure-drill/configuration-options/020-start-up-options.md
+++ b/_docs/configure-drill/configuration-options/020-start-up-options.md
@@ -46,17 +46,11 @@ override.conf` file located in Drill’s` /conf` directory.
 The summary of start-up options, also known as boot options, lists default values. The following descriptions provide more detail on key options that are frequently reconfigured:
 
 * drill.exec.sys.store.provider.class  
-  
-  Defines the persistent storage (PStore) provider. The [PStore]({{ site.baseurl }}/docs/persistent-configuration-storage) holds configuration and profile data. 
-
-* drill.exec.buffer.size
-
-  Defines the amount of memory available, in terms of record batches, to hold data on the downstream side of an operation. Drill pushes data downstream as quickly as possible to make data immediately available. This requires Drill to use memory to hold the data pending operations. When data on a downstream operation is required, that data is immediately available so Drill does not have to go over the network to process it. Providing more memory to this option increases the speed at which Drill completes a query.
-
-* drill.exec.sort.external.spill.directories
-
-  Tells Drill which directory to use when spooling. Drill uses a spool and sort operation for beyond memory operations. The sorting operation is designed to spool to a Hadoop file system. The default Hadoop file system is a local file system in the /tmp directory. Spooling performance (both writing and reading back from it) is constrained by the file system. For MapR clusters, use MapReduce volumes or set up local volumes to use for spooling purposes. Volumes improve performance and stripe data across as many disks as possible.
-
+  Defines the persistent storage (PStore) provider. The [PStore]({{ site.baseurl }}/docs/persistent-configuration-storage) holds configuration and profile data.  
+* drill.exec.buffer.size  
+  Defines the amount of memory available, in terms of record batches, to hold data on the downstream side of an operation. Drill pushes data downstream as quickly as possible to make data immediately available. This requires Drill to use memory to hold the data pending operations. When data on a downstream operation is required, that data is immediately available so Drill does not have to go over the network to process it. Providing more memory to this option increases the speed at which Drill completes a query.  
+* drill.exec.sort.external.spill.directories  
+  Tells Drill which directory to use when spooling. Drill uses a spool and sort operation for beyond memory operations. The sorting operation is designed to spool to a Hadoop file system. The default Hadoop file system is a local file system in the `/tmp` directory. Spooling performance (both writing and reading back from it) is constrained by the file system. For MapR clusters, use MapReduce volumes or set up local volumes to use for spooling purposes. Volumes improve performance and stripe data across as many disks as possible.  
 * drill.exec.zk.connect  
   Provides Drill with the ZooKeeper quorum to use to connect to data sources. Change this setting to point to the ZooKeeper quorum that you want Drill to use. You must configure this option on each Drillbit node.
 

http://git-wip-us.apache.org/repos/asf/drill/blob/97a211ee/_docs/rn/070-0.9.0-rn.md
----------------------------------------------------------------------
diff --git a/_docs/rn/070-0.9.0-rn.md b/_docs/rn/070-0.9.0-rn.md
new file mode 100755
index 0000000..edae5c8
--- /dev/null
+++ b/_docs/rn/070-0.9.0-rn.md
@@ -0,0 +1,29 @@
+---
+title: "Apache Drill 0.9.0 Release Notes"
+parent: "Release Notes"
+---
+It has been about a month since the release of Drill 0.8, which included [more than 240 improvements]({{ site.baseurl }}/blog/drill-0.8-released/). Today we're happy to announce the availability of Drill 0.9, providing additional enhancements and bug fixes. In fact, this release includes [200 resolved JIRAs](https://issues.apache.org/jira/secure/ReleaseNote.jspa?projectId=12313820&version=12328813). Some of the noteworthy features in Drill 0.9 are:
+
+* **Authentication** ([DRILL-2674](https://issues.apache.org/jira/browse/DRILL-2674)). Drill now supports username/password authentication through the Java and C++ clients, as well as JDBC and ODBC. On the server-side, Drill leverages Linux PAM to securely validate the credentials. Users can choose to use an external user directory such as Active Directory or LDAP. To enable authentication, set the `security.user.auth` option in `drill-override.conf`.
+* **Impersonation** ([DRILL-2363](https://issues.apache.org/jira/browse/DRILL-2363)). Queries now execute and access resources using the identity of the user who submitted the query. Previously, all queries would run as the same user (eg, `drill`). With the new impersonation capability, the query will fail if the submitting user does not have permission to read the requested file(s) in the distributed file system. To enable impersonation, set the `drill.exec.impersonation` option in `drill-override.conf`.
+* **Ownership chaining**. Drill now allows views with different owners to be chained. This represents a very flexible access control solution. For example, an administrator with access to raw, sensitive data could create a view called `masked` which would expose only a subset of the data to other users. The administrator would enable users to read the `masked` view but not the raw data. Note that Drill provides an option `max_chained_user_hops` that specifies how many ownership changed are allowed in a chain, thereby providing administrators (or data stewards) more control over sharing of data.
+* **MongoDB authentication** ([DRILL-1502](https://issues.apache.org/jira/browse/DRILL-1502)). Drill can now connect to a MongoDB cluster that requires authentication.
+* **Extended JSON datatypes**. Our friends at MongoDB invented [extended JSON](http://docs.mongodb.org/manual/reference/mongodb-extended-json/) - a set of extensions to the JSON format for supporting additional data types. We decided to embrace extended JSON in Drill. For example, standard JSON doesn't have a time type, so a time could be represented as either a string or a number: `{"foo": "19:20:30.450Z"}` is just a string. With extended JSON, the `$time` qualifier can be used to specify that `foo` is a time `{"foo": {"$time": "19:20:30.450Z"}}`.
+  We now support a number of qualifiers including `$bin`, `$date`, `$time`, `$interval`, `$numberLong` and `$dateDay` (see [the example](https://github.com/apache/drill/blob/master/exec/java-exec/src/test/resources/vector/complex/extended.json)). We're in the process of adding some additional qualifiers to make sure that all of MongoDB's extended types are supported (this is particularly important when querying data in MongoDB).
+* **Avro support** ([DRILL-1512](https://issues.apache.org/jira/browse/DRILL-1512)). Drill can now read Avro files. This patch was contributed by Andrew Selden at Elastic.co (formerly known as Elasticsearch).
+* **Improved error messages** ([DRILL-2675](https://issues.apache.org/jira/browse/DRILL-2675) and more). It can be challenging for a complex distributed system like Drill to translate low-level internal conditions into actionable messages to the user. This release includes several enhancements that enable Drill to accomplish just that in a variety of cases.
+* **Parquet and Calcite enhancements** ([DRILL-1410](https://issues.apache.org/jira/browse/DRILL-1410) and [DRILL-1384](https://issues.apache.org/jira/browse/DRILL-1384)). Drill isn't a traditional query engine - it's the first analytical query engine with a JSON data model. This has required us to enhance Parquet (our columnar format) and Calcite (our SQL parser). These enhancements have now been contributed back to those projects, and Drill is using the latest versions which include these enhancements.
+* **New sys tables for memory and thread information** ([DRILL-2275](https://issues.apache.org/jira/browse/DRILL-2275)). Drill includes two new `sys` tables that provide real-time metrics about memory utilization and threads on each of the nodes in the cluster. You can run a simple `SELECT *` to see what information is available:
+
+    ```sql
+    SELECT * FROM sys.drillmemory;
+    SELECT * FROM sys.drillbitthreads;
+    ```
+
+* **Support for very wide tables** ([DRILL-2739](https://issues.apache.org/jira/browse/DRILL-2739)). Drill previously had some issues with tables that had more than 4095 colums. This limitation has been addressed.
+
+You can now [download Drill 0.9]({{ site.baseurl }}/download/). As always, you can check out the official [release notes]({{ site.baseurl }}/docs/release-notes/) for more details.
+
+We're gearing up for Drill's 1.0 release later this month. Stay tuned!
+
+

http://git-wip-us.apache.org/repos/asf/drill/blob/97a211ee/_docs/sql-reference/sql-commands/005-supported-sql-commands.md
----------------------------------------------------------------------
diff --git a/_docs/sql-reference/sql-commands/005-supported-sql-commands.md b/_docs/sql-reference/sql-commands/005-supported-sql-commands.md
index df604cb..66db086 100644
--- a/_docs/sql-reference/sql-commands/005-supported-sql-commands.md
+++ b/_docs/sql-reference/sql-commands/005-supported-sql-commands.md
@@ -5,5 +5,19 @@ parent: "SQL Commands"
 The following table provides a list of the SQL commands that Drill supports,
 with their descriptions and example syntax:
 
-<table style='table-layout:fixed;width:100%'>
-    <tr><th >Command</th><th >Description</th><th >Syntax</th></tr><tr><td valign="top" width="15%"><a href="/docs/alter-session">ALTER SESSION</a></td><td valign="top" width="60%">Changes a system setting for the duration of a session. A session ends when you quit the Drill shell. For a list of Drill options and their descriptions, refer to <a href="/docs/planning-and-execution-options">Planning and Execution Options</a>.</td><td valign="top"><pre>ALTER SESSION SET `&lt;option_name&gt;`=&lt;value&gt;;</pre></td></tr><tr><td valign="top" ><a href="/docs/alter-system">ALTER SYSTEM</a></td><td valign="top" >Permanently changes a system setting. The new settings persist across all sessions. For a list of Drill options and their descriptions, refer to <a href="/docs/planning-and-execution-options">Planning and Execution Options</a>.</td><td valign="top" ><pre>ALTER SYSTEM SET `&lt;option_name&gt;`=&lt;value&gt;;</pre></td></tr><tr><td valign="top" ><p><a href="/docs/create-table-as--cta
 s">CREATE TABLE AS<br />(CTAS)</a></p></td><td valign="top" >Creates a new table and populates the new table with rows returned from a SELECT query. Use the CREATE TABLE AS (CTAS) statement in place of INSERT INTO. When you issue the CTAS command, you create a directory that contains parquet or CSV files. Each workspace in a file system has a default file type.<br />You can specify which writer you want Drill to use when creating a table: parquet, CSV, or JSON (as specified with the <code>store.format</code> option).</td><td valign="top" ><pre class="programlisting">CREATE TABLE new_table_name AS &lt;query&gt;;</pre></td></tr><tr><td - valign="top" ><a href="/docs/create-view">CREATE VIEW </a></td><td - valign="top" >Creates a virtual structure for the result set of a stored query.-</td><td -valign="top" ><pre>CREATE [OR REPLACE] VIEW [workspace.]view_name [ (column_name [, ...]) ] AS &lt;query&gt;;</pre></td></tr><tr><td  valign="top" ><a href="/docs/describe">DESCRIBE</a></td><td 
  valign="top" >Returns information about columns in a table or view.</td><td valign="top" ><pre>DESCRIBE [workspace.]table_name|view_name</pre></td></tr><tr><td valign="top" ><a href="/docs/drop-view">DROP VIEW</a></td><td valign="top" >Removes a view.</td><td valign="top" ><pre>DROP VIEW [workspace.]view_name ;</pre></td></tr><tr><td  valign="top" ><a href="/docs/explain">EXPLAIN PLAN FOR</a></td><td valign="top" >Returns the physical plan for a particular query.</td><td valign="top" ><pre>EXPLAIN PLAN FOR &lt;query&gt;;</pre></td></tr><tr><td valign="top" ><a href="/docs/explain">EXPLAIN PLAN WITHOUT IMPLEMENTATION FOR</a></td><td valign="top" >Returns the logical plan for a particular query.</td><td  valign="top" ><pre>EXPLAIN PLAN WITHOUT IMPLEMENTATION FOR &lt;query&gt;;</pre></td></tr><tr><td colspan="1" valign="top" ><a href="/docs/select" rel="nofollow">SELECT</a></td><td valign="top" >Retrieves data from tables and files.</td><td  valign="top" ><pre>[WITH subquery]<br />SEL
 ECT column_list FROM table_name <br />[WHERE clause]<br />[GROUP BY clause]<br />[HAVING clause]<br />[ORDER BY clause];</pre></td></tr><tr><td  valign="top" ><a href="/docs/show-databases-and-show-schemas">SHOW DATABASES </a></td><td valign="top" >Returns a list of available schemas. Equivalent to SHOW SCHEMAS.</td><td valign="top" ><pre>SHOW DATABASES;</pre></td></tr><tr><td valign="top" ><a href="/docs/show-files" >SHOW FILES</a></td><td valign="top" >Returns a list of files in a file system schema.</td><td valign="top" ><pre>SHOW FILES IN filesystem.`schema_name`;<br />SHOW FILES FROM filesystem.`schema_name`;</pre></td></tr><tr><td valign="top" ><a href="/docs/show-databases-and-show-schemas">SHOW SCHEMAS</a></td><td - valign="top" >Returns a list of available schemas. Equivalent to SHOW DATABASES.</td><td valign="top" ><pre>SHOW SCHEMAS;</pre></td></tr><tr><td valign="top" ><a href="/docs/show-tables">SHOW TABLES</a></td><td valign="top" >Returns a list of tables and views.</t
 d><td valign="top" ><pre>SHOW TABLES;</pre></td></tr><tr><td valign="top" ><a href="/docs/use">USE</a></td><td valign="top" >Change to a particular schema. When you opt to use a particular schema, Drill issues queries on that schema only.</td><td valign="top" ><pre>USE schema_name;</pre></td></tr></table>
+| Command                                                                  | Description                                                                                                                                                                                        | Syntax                                                                                                             |
+|--------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------|
+| [ALTER SESSION]({{site.baseurl}}/docs/alter-session)                     | Changes a system setting for the duration of a session. A session ends when you quit the Drill shell. For a list of Drill options and their descriptions, refer to Planning and Execution Options. | ALTER SESSION SET \`option_name` = value;                                                                          |
+| [ALTER SYSTEM]({{site.baseurl}}/docs/alter-system)                       | Permanently changes a system setting. The new settings persist across all sessions. For a list of Drill options and their descriptions, refer to Planning and Execution Options.                   | ALTER SYSTEM SET \`option_name`=value;                                                                             |
+| [CREATE TABLE AS(CTAS) ]({{site.baseurl}}/docs/create-table-as--ctas)    | Creates a new table and populates the new table with rows returned from a SELECT query. Use the CREATE TABLE AS (CTAS) statement in place of INSERT INTO.                                          | CREATE TABLE name AS query;                                                                                        |
+| [CREATE VIEW]({{site.baseurl}}/docs/create-view)                         | Creates a virtual structure for the result set of a stored query.-                                                                                                                                 | CREATE [OR REPLACE] VIEW [workspace.]view_name [ (column_name [, ...]) ] AS query;                                 |
+| [DESCRIBE ]({{site.baseurl}}/docs/describe)                              | Returns information about columns in a table or view.                                                                                                                                              | DESCRIBE [workspace.]table_name                                                                                    |
+| [DROP VIEW]({{site.baseurl}}/docs/drop-view)                             | Removes a view.                                                                                                                                                                                    | DROP VIEW [workspace.]view_name ;                                                                                  |
+| [EXPLAIN PLAN FOR]({{site.baseurl}}/docs/explain)                        | Returns the physical plan for a particular query.                                                                                                                                                  | EXPLAIN PLAN FOR query;                                                                                            |
+| [EXPLAIN PLAN WITHOUT IMPLEMENTATION FOR]({{site.baseurl}}/docs/explain) | Returns the logical plan for a particular query.                                                                                                                                                   | EXPLAIN PLAN WITHOUT IMPLEMENTATION FOR query;                                                                     |
+| [SELECT]({{site.baseurl}}/docs/select)                                   | Retrieves data from tables and files.                                                                                                                                                              | [WITH subquery]SELECT column_list FROM table_name[WHERE clause] [GROUP BY clause][HAVING clause][ORDER BY clause]; |
+| [SHOW DATABASES]({{site.baseurl}}/docs/show-databases-and-show-schemas)  | Returns a list of available schemas. Equivalent to SHOW SCHEMAS.                                                                                                                                   | SHOW DATABASES;                                                                                                    |
+| [SHOW FILES]({{site.baseurl}}/docs/show-files)                           | Returns a list of files in a file system schema.                                                                                                                                                   | SHOW FILES IN&#124;FROM filesystem.\`schema_name`;                                                                 |
+| [SHOW SCHEMAS]({{site.baseurl}}/docs/show-databases-and-show-schemas)    | Returns a list of available schemas. Equivalent to SHOW DATABASES.                                                                                                                                 | SHOW SCHEMAS;                                                                                                      |
+| [SHOW TABLES]({{site.baseurl}}/docs/show-tables)                         | Returns a list of tables and views.                                                                                                                                                                | SHOW TABLES;                                                                                                       |
+| [USE]({{site.baseurl}}/docs/use)                                         | Change to a particular schema. When you opt to use a particular schema, Drill issues queries on that schema only.                                                                                  | USE schema_name;                                                                                                   |
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/drill/blob/97a211ee/_docs/sql-reference/sql-commands/010-alter-session.md
----------------------------------------------------------------------
diff --git a/_docs/sql-reference/sql-commands/010-alter-session.md b/_docs/sql-reference/sql-commands/010-alter-session.md
index 5173b57..588688e 100644
--- a/_docs/sql-reference/sql-commands/010-alter-session.md
+++ b/_docs/sql-reference/sql-commands/010-alter-session.md
@@ -9,7 +9,7 @@ session. Session level settings override system level settings.
 
 The ALTER SESSION command supports the following syntax:
 
-    ALTER SESSION SET `<option_name>`=<value>;
+    ALTER SESSION SET `option_name` = value;
 
 ## Parameters
 

http://git-wip-us.apache.org/repos/asf/drill/blob/97a211ee/_docs/sql-reference/sql-commands/020-alter-system.md
----------------------------------------------------------------------
diff --git a/_docs/sql-reference/sql-commands/020-alter-system.md b/_docs/sql-reference/sql-commands/020-alter-system.md
index cc7a9f1..573f2bd 100644
--- a/_docs/sql-reference/sql-commands/020-alter-system.md
+++ b/_docs/sql-reference/sql-commands/020-alter-system.md
@@ -10,7 +10,7 @@ settings.
 
 The ALTER SYSTEM command supports the following syntax:
 
-    ALTER SYSTEM SET `<option_name>`=<value>;
+    ALTER SYSTEM SET `option_name` =  value;
 
 ## Parameters
 

http://git-wip-us.apache.org/repos/asf/drill/blob/97a211ee/_docs/sql-reference/sql-commands/030-create-table-as.md
----------------------------------------------------------------------
diff --git a/_docs/sql-reference/sql-commands/030-create-table-as.md b/_docs/sql-reference/sql-commands/030-create-table-as.md
index c5425fd..e742eb1 100644
--- a/_docs/sql-reference/sql-commands/030-create-table-as.md
+++ b/_docs/sql-reference/sql-commands/030-create-table-as.md
@@ -6,7 +6,7 @@ You can create tables in Drill by using the CTAS command.
 
 ## Syntax
 
-    CREATE TABLE <name> [ (column list) ] AS <query>;
+    CREATE TABLE name [ (column list) ] AS query;
 
 *name* is a unique directory name, optionally prefaced by a storage plugin name, such as dfs, and a workspace, such as tmp using [dot notation]({{site.baseurl}}/docs/workspaces).  
 *column list* is an optional list of column names or aliases in the new table.  

http://git-wip-us.apache.org/repos/asf/drill/blob/97a211ee/_docs/sql-reference/sql-commands/035-partition-by-clause.md
----------------------------------------------------------------------
diff --git a/_docs/sql-reference/sql-commands/035-partition-by-clause.md b/_docs/sql-reference/sql-commands/035-partition-by-clause.md
index 319291a..e472f0f 100644
--- a/_docs/sql-reference/sql-commands/035-partition-by-clause.md
+++ b/_docs/sql-reference/sql-commands/035-partition-by-clause.md
@@ -16,7 +16,7 @@ When the base table in the SELECT statement is schema-less, include columns in t
 
     CREATE TABLE dest_name [ (column, . . .) ]
     [ PARTITION_BY (column, . . .) ] 
-    AS SELECT <column_list> FROM <source_name>;
+    AS SELECT column_list FROM <source_name>;
 
 When columns in the source table have ambiguous names, such as COLUMNS[0], define one or more column aliases in the SELECT statement. Use the alias name or names in the CREATE TABLE list. List aliases in the same order as the corresponding columns in the SELECT statement. Matching order is important because Drill performs an overwrite operation.  
 
@@ -28,7 +28,7 @@ For example:
 
     CREATE TABLE by_yr (yr, ngram, occurrances) PARTITION BY (yr) AS SELECT columns[1] yr, columns[0] ngram, columns[2] occurrances FROM `googlebooks-eng-all-5gram-20120701-zo.tsv`;
 
-When the partition column is resolved to * column in a schema-less query, the * column cannot be a result of join operation. 
+When the partition column is resolved to * column (due to a SELECT * query) in a schema-less query, the * column cannot be a result of join operation. 
 
 The output of CTAS using a PARTITION BY clause creates separate files. Each file contains one partition value, and Drill can create multiple files for the same partition value.
 

http://git-wip-us.apache.org/repos/asf/drill/blob/97a211ee/_docs/sql-reference/sql-commands/050-create-view.md
----------------------------------------------------------------------
diff --git a/_docs/sql-reference/sql-commands/050-create-view.md b/_docs/sql-reference/sql-commands/050-create-view.md
index 4d71148..ceb8bac 100644
--- a/_docs/sql-reference/sql-commands/050-create-view.md
+++ b/_docs/sql-reference/sql-commands/050-create-view.md
@@ -14,13 +14,13 @@ existing views or any other available storage plugin data sources.
 
 The CREATE VIEW command supports the following syntax:
 
-    CREATE [OR REPLACE] VIEW [workspace.]view_name [ (column_name [, ...]) ] AS <query>;
+    CREATE [OR REPLACE] VIEW [workspace.]view_name [ (column_name [, ...]) ] AS query;
 
 Use CREATE VIEW to create a new view. Use CREATE OR REPLACE VIEW to replace an
 existing view with the same name. When you replace a view, the query must
 generate the same set of columns with the same column names and data types.
 
-**Note:** Follow Drill’s rules for identifiers when you name the view. See coming soon...
+**Note:** Follow Drill’s rules for identifiers when you name the view. 
 
 ## Parameters
 

http://git-wip-us.apache.org/repos/asf/drill/blob/97a211ee/_docs/sql-reference/sql-commands/081-from-clause.md
----------------------------------------------------------------------
diff --git a/_docs/sql-reference/sql-commands/081-from-clause.md b/_docs/sql-reference/sql-commands/081-from-clause.md
index 8e0cec9..a8f7f46 100755
--- a/_docs/sql-reference/sql-commands/081-from-clause.md
+++ b/_docs/sql-reference/sql-commands/081-from-clause.md
@@ -21,16 +21,16 @@ Includes one or more *table_references* and is typically followed by the WHERE,
        ( subquery ) [ AS ] alias [ ( column_alias [, ...] ) ]
        table_reference [ ON join_condition ]
 
-   * *with\_subquery\_table_name*  
+   * *with\_subquery\_table_name*  
    A table defined by a subquery in the WITH clause.
 
-  * *table_name*  
+  * *table_name*  
   Name of a table or view. In Drill, you can also refer to a file system directory or a specific file.
 
-   * *alias*  
-   A temporary alternative name for a table or view that provides a convenient shortcut for identifying tables in other parts of a query, such as the WHERE clause. You must supply an alias for a table derived from a subquery. In other table references, aliases are optional. The AS keyword is always optional. Drill does not support the GROUP BY alias.
+   * *alias*  
+   A temporary alternative name for a table or view that provides a convenient shortcut for identifying tables in other parts of a query, such as the WHERE clause. You must supply an alias for a table derived from a subquery. Aliases might be required for [querying nested JSON]({{site.baseurl}}/docs/json-data-model/#analyzing-json). Aliases are definitely required to resolve ambiguous references, such as using the name "user" to query the Drill profiles. Drill treats "user" as a function in this case, and the returns unexpected results. If you use a table alias, Drill treats "user" as a column identifier, and the query returns expected results. The AS keyword is always optional. Drill does not support the GROUP BY alias.
 
-   * *column_alias*  
+   * *column_alias*  
    A temporary alternative name for a column in a table or view. You can use named column aliases in the SELECT list to provide meaningful names for regular columns and computed columns, such as the results of aggregate functions. You cannot reference column aliases in the following clauses:  
        WHERE  
        GROUP BY  
@@ -38,24 +38,24 @@ Includes one or more *table_references* and is typically followed by the WHERE,
 
        Because Drill works with schema-less data sources, you cannot use positional aliases (1, 2, etc.) to refer to SELECT list columns, except in the ORDER BY clause.
 
-   * *subquery*  
+   * *subquery*  
    A query expression that evaluates to a table. The table exists only for the duration of the query and is typically given a name or alias, though an alias is not required. You can also define column names for tables that derive from subqueries. Naming column aliases is important when you want to join the results of subqueries to other tables and when you want to select or constrain those columns elsewhere in the query. A subquery may contain an ORDER BY clause, but this clause may have no effect if a LIMIT or OFFSET clause is not also specified. You can use the following subquery operators in Drill queries. These operators all return Boolean results.  
-       * ALL  
-       * ANY  
-       * EXISTS  
-       * IN  
-       * SOME  
-      
+       * ALL  
+       * ANY  
+       * EXISTS  
+       * IN  
+       * SOME  
+      
        In general, correlated subqueries are supported. EXISTS and NOT EXISTS subqueries that do not contain a correlation join are not yet supported.
 
-   * *join_type*  
+   * *join_type*  
    Specifies one of the following join types:  
        [INNER] JOIN  
        LEFT [OUTER] JOIN  
        RIGHT [OUTER] JOIN  
        FULL [OUTER] JOIN
 
-   * *ON join_condition*  
+   * *ON join_condition*  
    A type of join specification where the joining columns are stated as a condition that follows the ON keyword.  
        Example:  ` homes join listing on homes.listid=listing.listid and homes.homeid=listing.homeid`
 
@@ -68,8 +68,8 @@ Return all of the rows that the equivalent inner join would return plus non-matc
 
 ## Usage Notes  
    * Joined columns must have comparable data types.
-   * A join with the ON syntax retains both joining columns in its intermediate result set.  
-
+   * A join with the ON syntax retains both joining columns in its intermediate result set.  
+
 ## Example  
 
        0: jdbc:drill:zk=local> SELECT tbl1.id, tbl1.type 

http://git-wip-us.apache.org/repos/asf/drill/blob/97a211ee/_docs/sql-reference/sql-commands/089-with-clause.md
----------------------------------------------------------------------
diff --git a/_docs/sql-reference/sql-commands/089-with-clause.md b/_docs/sql-reference/sql-commands/089-with-clause.md
index 2b5ca89..41c2cc7 100755
--- a/_docs/sql-reference/sql-commands/089-with-clause.md
+++ b/_docs/sql-reference/sql-commands/089-with-clause.md
@@ -12,7 +12,9 @@ name, an optional list of column names, and a SELECT statement.
 The WITH clause supports the following syntax:
 
     WITH with_subquery [, ...]
-    where with_subquery is:
+
+where with_subquery is:
+
     with_subquery_table_name [ ( column_name [, ...] ) ] AS ( query ) 
 
 ## Parameters
@@ -27,7 +29,7 @@ An optional list of output column names for the WITH clause subquery,
 separated by commas. The number of column names specified must be equal to or
 less than the number of columns defined by the subquery.
 
-query  
+*query*  
 Any SELECT query that Drill supports. See
 [SELECT]({{ site.baseurl }}/docs/SELECT+Statements).
 
@@ -41,9 +43,9 @@ reuse the results for query optimization.
 
 You can use a WITH clause in the following SQL statements:
 
-  * SELECT (including subqueries within SELECT statements)  
-  * CREATE TABLE AS
-  * CREATE VIEW
+  * SELECT (including subqueries within SELECT statements)  
+  * CREATE TABLE AS
+  * CREATE VIEW
   * EXPLAIN
 
 You can reference the temporary tables in the FROM clause of the query. If the

http://git-wip-us.apache.org/repos/asf/drill/blob/97a211ee/_docs/sql-reference/sql-functions/030-date-time-functions-and-arithmetic.md
----------------------------------------------------------------------
diff --git a/_docs/sql-reference/sql-functions/030-date-time-functions-and-arithmetic.md b/_docs/sql-reference/sql-functions/030-date-time-functions-and-arithmetic.md
index 514651c..b1ad6b0 100644
--- a/_docs/sql-reference/sql-functions/030-date-time-functions-and-arithmetic.md
+++ b/_docs/sql-reference/sql-functions/030-date-time-functions-and-arithmetic.md
@@ -46,7 +46,7 @@ Find the interval between midnight today, April 3, 2015, and June 13, 1957.
     +------------+
     1 row selected (0.064 seconds)
 
-Find the interval between midnight today, May 21, 2015, and hire dates of employees 578 and 761 in the `employees.json` file included with the Drill installation.
+Find the interval between midnight today, May 21, 2015, and hire dates of employees 578 and 761 in the `employees.json` file. The file is installed with Drill and located in the Drill classpath.
 
     SELECT AGE(CAST(hire_date AS TIMESTAMP)) FROM cp.`employee.json` where employee_id IN( '578','761');
     +------------------+

http://git-wip-us.apache.org/repos/asf/drill/blob/97a211ee/_docs/sql-reference/sql-window-functions/010-sql-window-functions-introduction.md
----------------------------------------------------------------------
diff --git a/_docs/sql-reference/sql-window-functions/010-sql-window-functions-introduction.md b/_docs/sql-reference/sql-window-functions/010-sql-window-functions-introduction.md
index f8f372f..1baca3f 100644
--- a/_docs/sql-reference/sql-window-functions/010-sql-window-functions-introduction.md
+++ b/_docs/sql-reference/sql-window-functions/010-sql-window-functions-introduction.md
@@ -52,7 +52,7 @@ Window functions are applied to the rows within each partition and sorted accord
 
 The following query uses the AVG() window function with the PARTITION BY clause to determine the average car sales for each dealer in Q1:  
 
-       select emp_name, dealer_id, sales, avg(sales) over (PARTITION BY dealer_id) as avgsales from q1_sales;
+       select emp_name, dealer_id, sales, avg(sales) over (partition by dealer_id) as avgsales from q1_sales;
        +-----------------+------------+--------+-----------+
        |    emp_name     | dealer_id  | sales  | avgsales  |
        +-----------------+------------+--------+-----------+
@@ -74,7 +74,7 @@ The following query uses the AVG() window function with the PARTITION BY clause
 
 Currently, Drill supports the following aggregate and ranking window functions:  
 
-Aggregation  
+Aggregate   
 
 * AVG()
 * COUNT()
@@ -90,16 +90,15 @@ Ranking
 * RANK()
 * ROW_NUMBER()
 
-All of the ranking functions listed depend on the sort ordering specified by the ORDER BY clause of the associated window definition. Rows that are not distinct in the ordering are called peers. The ranking functions are defined so that they give the same answer for any two peer rows.  
+All of the ranking functions depend on the sort ordering specified by the ORDER BY clause of the associated window definition. Rows that are not distinct in the ordering are called peers. The ranking functions are defined so that they give the same answer for any two peer rows.  
 
 ## Syntax  
 
-       window_function (expression) 
-       OVER (
+       window_function (expression) OVER (
        [ PARTITION BY expr_list ]
        [ ORDER BY order_list ][ frame_clause ] )  
 
-where function is one of the functions described, such as AVG() and *expr_list* is:  
+where function is one of the functions described, such as AVG(), and *expr_list* is:  
 
        expression | column_name [, expr_list ]
 
@@ -138,13 +137,13 @@ PARTITION BY *expr_list*
 PARTITION BY is an optional clause that subdivides the data into partitions. Including the partition clause divides the query result set into partitions, and the window function is applied to each partition separately. Computation restarts for each partition. If you do not include a partition clause, the function calculates on the entire table or file.  
 
 ORDER BY *order_list*  
-The ORDER BY clause defines the logical order of the rows within each partition of the result set. If no PARTITION BY is specified, ORDER BY uses the entire table. ORDER BY is optional for the aggregation window functions, but required for the ranking functions. This ORDER BY clause does not relate to the ORDER BY clause that you use outside of the OVER clause.  
+The ORDER BY clause defines the logical order of the rows within each partition of the result set. If no PARTITION BY is specified, ORDER BY uses the entire table. ORDER BY is optional for the aggregate window functions and required for the ranking functions. This ORDER BY clause does not relate to the ORDER BY clause used outside of the OVER clause.  
 
 The window function is applied to the rows within each partition sorted according to the order specification.  
 
 Column identifiers or expressions that evaluate to column identifiers are required in the order list. You can also use constants as substitutes for column names.  
 
-NULLS are treated as their own group, sorted and ranked last in ASC, and sorted and ranked first in DESC. ASC is the default sort order.  
+NULLS are treated as their own group, sorted and ranked last in ASC and sorted and ranked first in DESC. ASC is the default sort order.  
 
 *column_name*  
 The name of a column to be partitioned by or ordered by.  
@@ -160,7 +159,7 @@ When the OVER clause contains an ORDER BY clause, the following frames are equiv
        RANGE UNBOUNDED PRECEDING
        RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW  
 
-When OVER clause does not contain an ORDER BY clause, the following frames are equivalent to the default frame:  
+When the OVER clause does not contain an ORDER BY clause, the following frames are equivalent to the default frame:  
 
        RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING
        ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING  

http://git-wip-us.apache.org/repos/asf/drill/blob/97a211ee/_docs/sql-reference/sql-window-functions/020-aggregate-window-functions.md
----------------------------------------------------------------------
diff --git a/_docs/sql-reference/sql-window-functions/020-aggregate-window-functions.md b/_docs/sql-reference/sql-window-functions/020-aggregate-window-functions.md
index 7b3922c..de692c6 100644
--- a/_docs/sql-reference/sql-window-functions/020-aggregate-window-functions.md
+++ b/_docs/sql-reference/sql-window-functions/020-aggregate-window-functions.md
@@ -3,11 +3,9 @@ title: "Aggregate Window Functions"
 parent: "SQL Window Functions"
 ---
 
-## Aggregate Window Functions
+Window functions operate on a set of rows and return a single value for each row from the underlying query. The OVER() clause differentiates window functions from other analytical and reporting functions. See [SQL Window Functions Introduction]({{site.baseurl}}/docs/sql-window-functions-introduction/). You can use certain aggregate functions as window functions in Drill. 
 
-Window functions operate on a set of rows and return a single value for each row from the underlying query. See SQL Window Functions. You can use certain aggregate functions as window functions in Drill. The OVER() clause differentiates window functions from other analytical and reporting functions. 
-
-The following table lists the aggregate functions that you can use as window functions in Drill with supported data types and descriptions of each function:  
+The following table lists the aggregate window functions with supported data types and descriptions:  
 
 
 | Window Function | Argument Type                                                                   | Return Type                                                                                                                                               | Description                                                                                                                                                                                                                                                 |
@@ -19,7 +17,7 @@ The following table lists the aggregate functions that you can use as window fun
 | SUM()           | SMALLINT, INTEGER, BIGINT, FLOAT, DOUBLE, DECIMAL, INTERVALDAY, or INTERVALYEAR | BIGINT for SMALLINT or INTEGER arguments, DECIMAL for BIGINT arguments, DOUBLE for floating-point arguments, otherwise the same as the argument data type | The SUM () window function returns the sum of the expression across all input values. The SUM function works with numeric values and ignores NULL values.                                                                                                   |
 
 ## Syntax  
-       window_function ( [ ALL ] <expression> ) 
+       window_function ( [ ALL ] expression ) 
        OVER ( [ PARTITION BY expr_list ] [ ORDER BY order_list frame_clause ] )
 
 
@@ -51,7 +49,7 @@ If an ORDER BY clause is used for an aggregate function, an explicit frame claus
 
 
 ## Examples  
-The following examples show queries that use each of the aggregate window functions in Drill. See Window Function Examples for information about the data and setup for these examples.
+The following examples show queries that use each of the aggregate window functions in Drill. See [SQL Window Functions Examples]({{site.baseurl}}/docs/sql-window-functions-examples/) for information about the data and setup for these examples.
  
 
 ### AVG()  
@@ -159,7 +157,7 @@ The following query uses the MIN() window function with the PARTITION BY clause
 ### SUM()  
 The following query uses the SUM() window function to total the amount of sales for each dealer in Q1. The word sum is a reserved keyword in Drill and must be enclosed in back ticks (``).  
 
-       select dealer_id, emp_name, sales, sum(sales) over(partition by dealer_id) as `sum` from q2_sales;
+       select dealer_id, emp_name, sales, sum(sales) over(partition by dealer_id) as `sum` from q1_sales;
        +------------+-----------------+--------+--------+
        | dealer_id  |    emp_name     | sales  |  sum   |
        +------------+-----------------+--------+--------+

http://git-wip-us.apache.org/repos/asf/drill/blob/97a211ee/_docs/sql-reference/sql-window-functions/030-ranking-window-functions.md
----------------------------------------------------------------------
diff --git a/_docs/sql-reference/sql-window-functions/030-ranking-window-functions.md b/_docs/sql-reference/sql-window-functions/030-ranking-window-functions.md
index fa9ebf9..90753df 100644
--- a/_docs/sql-reference/sql-window-functions/030-ranking-window-functions.md
+++ b/_docs/sql-reference/sql-window-functions/030-ranking-window-functions.md
@@ -3,11 +3,9 @@ title: "Ranking Window Functions"
 parent: "SQL Window Functions"
 ---
 
-## Ranking Window Functions
+Window functions operate on a set of rows and return a single value for each row from the underlying query. The OVER() clause differentiates window functions from other analytical and reporting functions. See [SQL Window Functions Introduction]({{site.baseurl}}/docs/sql-window-functions-introduction/). You can use ranking functions in Drill to return a ranking value for each row in a partition.  
 
-Window functions operate on a set of rows and return a single value for each row from the underlying query. See SQL Window Functions. The OVER() clause differentiates window functions from other analytical and reporting functions. 
-
-The following table lists the ranking window functions in Drill with return data types and descriptions of each function:  
+The following table lists the ranking window functions with supported data types and descriptions:  
 
 | Window Function | Return Type      | Description                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                           |
 |-----------------|------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
@@ -33,12 +31,12 @@ CUME_DIST(), DENSE_RANK(), PERCENT_RANK(), RANK(), ROW_NUMBER()
 The functions do not take arguments, however the empty parentheses are required.  
 
 OVER clause  
-The window clauses for the function. The OVER clause cannot contain an explicit frame specification, but must include an ORDER BY clause. See Window Function Syntax for OVER clause syntax.
+The window clauses for the function. The OVER clause cannot contain an explicit frame specification, but must include an ORDER BY clause. See [Window Function Syntax]({{site.baseurl}}/docs/sql-window-functions-introduction/#syntax) for OVER clause syntax.
 
 
 
 ## Examples  
-The following examples show queries that use each of the ranking window functions in Drill. See Window Function Examples for information about the data and setup for these examples.
+The following examples show queries that use each of the ranking window functions in Drill. See [Window Functions Examples]({{site.baseurl}}/docs/sql-window-functions-examples/) for information about the data and setup for these examples.
  
 
 ### CUME_DIST()  

http://git-wip-us.apache.org/repos/asf/drill/blob/97a211ee/_docs/sql-reference/sql-window-functions/040-sql-window-functions-examples.md
----------------------------------------------------------------------
diff --git a/_docs/sql-reference/sql-window-functions/040-sql-window-functions-examples.md b/_docs/sql-reference/sql-window-functions/040-sql-window-functions-examples.md
index a9c1a28..580b107 100644
--- a/_docs/sql-reference/sql-window-functions/040-sql-window-functions-examples.md
+++ b/_docs/sql-reference/sql-window-functions/040-sql-window-functions-examples.md
@@ -3,7 +3,7 @@ title: "SQL Window Functions Examples"
 parent: "SQL Window Functions"
 ---
 
-The window function examples use a view named q1\_sales that was created from a CSV file named emp_sales and stored in a directory on the local file system:
+The window function examples use a view named q1\_sales that was created from a CSV file named emp_sales and stored in a directory on the local file system.
  
 The emp_sales.csv file contains the following information:  
 
@@ -51,7 +51,7 @@ You can then run the USE command to change to the schema with the file. All quer
        | true  | Default schema changed to [dfs.emp]  |
        +-------+--------------------------------------+
  
-To create the q1_sales view used in the examples, issue the following query with the CREATE VIEW command to Drill.
+To create the q1_sales view used in the examples, issue the following query with the CREATE VIEW command.
 
 Note: You must use column numbers when querying CSV files. Also, CAST the columns to a specific data type to avoid incorrect implicit casting by Drill. This can affect the accuracy of window function results. In Drill, the column array starts with 0 as the first column.
  


[2/2] drill git commit: get rid of 1.1.0 rn

Posted by br...@apache.org.
get rid of 1.1.0 rn


Project: http://git-wip-us.apache.org/repos/asf/drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/drill/commit/4be94a15
Tree: http://git-wip-us.apache.org/repos/asf/drill/tree/4be94a15
Diff: http://git-wip-us.apache.org/repos/asf/drill/diff/4be94a15

Branch: refs/heads/gh-pages
Commit: 4be94a15d2dcbc9996d57b5e01fabea45c6dfad1
Parents: 97a211e
Author: Kristine Hahn <kh...@maprtech.com>
Authored: Wed Jul 1 16:41:55 2015 -0700
Committer: Kristine Hahn <kh...@maprtech.com>
Committed: Wed Jul 1 16:41:55 2015 -0700

----------------------------------------------------------------------
 _data/docs.json | 85 +++++++++++++++++++++-------------------------------
 1 file changed, 34 insertions(+), 51 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/drill/blob/4be94a15/_data/docs.json
----------------------------------------------------------------------
diff --git a/_data/docs.json b/_data/docs.json
index 8883f4c..997d36d 100644
--- a/_data/docs.json
+++ b/_data/docs.json
@@ -324,8 +324,8 @@
             "next_title": "Apache Drill 1.0.0 Release Notes", 
             "next_url": "/docs/apache-drill-1-0-0-release-notes/", 
             "parent": "Release Notes", 
-            "previous_title": "Apache Drill 0.8.0 Release Notes", 
-            "previous_url": "/docs/apache-drill-0-8-0-release-notes/", 
+            "previous_title": "Apache Drill 0.9.0 Release Notes", 
+            "previous_url": "/docs/apache-drill-0-9-0-release-notes/", 
             "relative_path": "_docs/rn/070-0.9.0-rn.md", 
             "title": "Apache Drill 0.9.0 Release Notes", 
             "url": "/docs/apache-drill-0-9-0-release-notes/"
@@ -338,8 +338,8 @@
                 }
             ], 
             "children": [], 
-            "next_title": "Apache Drill 1.1.0 Release Notes", 
-            "next_url": "/docs/apache-drill-1-1-0-release-notes/", 
+            "next_title": "Sample Datasets", 
+            "next_url": "/docs/sample-datasets/", 
             "parent": "Release Notes", 
             "previous_title": "Apache Drill 0.9.0 Release Notes", 
             "previous_url": "/docs/apache-drill-0-9-0-release-notes/", 
@@ -347,23 +347,6 @@
             "title": "Apache Drill 1.0.0 Release Notes", 
             "url": "/docs/apache-drill-1-0-0-release-notes/"
         }, 
-        "Apache Drill 1.1.0 Release Notes": {
-            "breadcrumbs": [
-                {
-                    "title": "Release Notes", 
-                    "url": "/docs/release-notes/"
-                }
-            ], 
-            "children": [], 
-            "next_title": "Sample Datasets", 
-            "next_url": "/docs/sample-datasets/", 
-            "parent": "Release Notes", 
-            "previous_title": "Apache Drill 1.0.0 Release Notes", 
-            "previous_url": "/docs/apache-drill-1-0-0-release-notes/", 
-            "relative_path": "_docs/rn/090-1.1.0-rn.md", 
-            "title": "Apache Drill 1.1.0 Release Notes", 
-            "url": "/docs/apache-drill-1-1-0-release-notes/"
-        }, 
         "Apache Drill Contribution Guidelines": {
             "breadcrumbs": [
                 {
@@ -7619,12 +7602,12 @@
                         }
                     ], 
                     "children": [], 
-                    "next_title": "Apache Drill 1.0.0 Release Notes", 
-                    "next_url": "/docs/apache-drill-1-0-0-release-notes/", 
+                    "next_title": "Apache Drill 0.9.0 Release Notes", 
+                    "next_url": "/docs/apache-drill-0-9-0-release-notes/", 
                     "parent": "Release Notes", 
                     "previous_title": "Apache Drill 0.8.0 Release Notes", 
                     "previous_url": "/docs/apache-drill-0-8-0-release-notes/", 
-                    "relative_path": "_docs/rn/070-0.9.0-rn.md", 
+                    "relative_path": "_docs/rn/070-0.9.0-rn copy.md", 
                     "title": "Apache Drill 0.9.0 Release Notes", 
                     "url": "/docs/apache-drill-0-9-0-release-notes/"
                 }, 
@@ -7636,14 +7619,14 @@
                         }
                     ], 
                     "children": [], 
-                    "next_title": "Apache Drill 1.1.0 Release Notes", 
-                    "next_url": "/docs/apache-drill-1-1-0-release-notes/", 
+                    "next_title": "Apache Drill 1.0.0 Release Notes", 
+                    "next_url": "/docs/apache-drill-1-0-0-release-notes/", 
                     "parent": "Release Notes", 
                     "previous_title": "Apache Drill 0.9.0 Release Notes", 
                     "previous_url": "/docs/apache-drill-0-9-0-release-notes/", 
-                    "relative_path": "_docs/rn/080-1.0.0-rn.md", 
-                    "title": "Apache Drill 1.0.0 Release Notes", 
-                    "url": "/docs/apache-drill-1-0-0-release-notes/"
+                    "relative_path": "_docs/rn/070-0.9.0-rn.md", 
+                    "title": "Apache Drill 0.9.0 Release Notes", 
+                    "url": "/docs/apache-drill-0-9-0-release-notes/"
                 }, 
                 {
                     "breadcrumbs": [
@@ -7656,11 +7639,11 @@
                     "next_title": "Sample Datasets", 
                     "next_url": "/docs/sample-datasets/", 
                     "parent": "Release Notes", 
-                    "previous_title": "Apache Drill 1.0.0 Release Notes", 
-                    "previous_url": "/docs/apache-drill-1-0-0-release-notes/", 
-                    "relative_path": "_docs/rn/090-1.1.0-rn.md", 
-                    "title": "Apache Drill 1.1.0 Release Notes", 
-                    "url": "/docs/apache-drill-1-1-0-release-notes/"
+                    "previous_title": "Apache Drill 0.9.0 Release Notes", 
+                    "previous_url": "/docs/apache-drill-0-9-0-release-notes/", 
+                    "relative_path": "_docs/rn/080-1.0.0-rn.md", 
+                    "title": "Apache Drill 1.0.0 Release Notes", 
+                    "url": "/docs/apache-drill-1-0-0-release-notes/"
                 }
             ], 
             "next_title": "Apache Drill 0.5.0 Release Notes", 
@@ -9941,8 +9924,8 @@
             "next_title": "AOL Search", 
             "next_url": "/docs/aol-search/", 
             "parent": "", 
-            "previous_title": "Apache Drill 1.1.0 Release Notes", 
-            "previous_url": "/docs/apache-drill-1-1-0-release-notes/", 
+            "previous_title": "Apache Drill 1.0.0 Release Notes", 
+            "previous_url": "/docs/apache-drill-1-0-0-release-notes/", 
             "relative_path": "_docs/140-sample-datasets.md", 
             "title": "Sample Datasets", 
             "url": "/docs/sample-datasets/"
@@ -15646,12 +15629,12 @@
                         }
                     ], 
                     "children": [], 
-                    "next_title": "Apache Drill 1.0.0 Release Notes", 
-                    "next_url": "/docs/apache-drill-1-0-0-release-notes/", 
+                    "next_title": "Apache Drill 0.9.0 Release Notes", 
+                    "next_url": "/docs/apache-drill-0-9-0-release-notes/", 
                     "parent": "Release Notes", 
                     "previous_title": "Apache Drill 0.8.0 Release Notes", 
                     "previous_url": "/docs/apache-drill-0-8-0-release-notes/", 
-                    "relative_path": "_docs/rn/070-0.9.0-rn.md", 
+                    "relative_path": "_docs/rn/070-0.9.0-rn copy.md", 
                     "title": "Apache Drill 0.9.0 Release Notes", 
                     "url": "/docs/apache-drill-0-9-0-release-notes/"
                 }, 
@@ -15663,14 +15646,14 @@
                         }
                     ], 
                     "children": [], 
-                    "next_title": "Apache Drill 1.1.0 Release Notes", 
-                    "next_url": "/docs/apache-drill-1-1-0-release-notes/", 
+                    "next_title": "Apache Drill 1.0.0 Release Notes", 
+                    "next_url": "/docs/apache-drill-1-0-0-release-notes/", 
                     "parent": "Release Notes", 
                     "previous_title": "Apache Drill 0.9.0 Release Notes", 
                     "previous_url": "/docs/apache-drill-0-9-0-release-notes/", 
-                    "relative_path": "_docs/rn/080-1.0.0-rn.md", 
-                    "title": "Apache Drill 1.0.0 Release Notes", 
-                    "url": "/docs/apache-drill-1-0-0-release-notes/"
+                    "relative_path": "_docs/rn/070-0.9.0-rn.md", 
+                    "title": "Apache Drill 0.9.0 Release Notes", 
+                    "url": "/docs/apache-drill-0-9-0-release-notes/"
                 }, 
                 {
                     "breadcrumbs": [
@@ -15683,11 +15666,11 @@
                     "next_title": "Sample Datasets", 
                     "next_url": "/docs/sample-datasets/", 
                     "parent": "Release Notes", 
-                    "previous_title": "Apache Drill 1.0.0 Release Notes", 
-                    "previous_url": "/docs/apache-drill-1-0-0-release-notes/", 
-                    "relative_path": "_docs/rn/090-1.1.0-rn.md", 
-                    "title": "Apache Drill 1.1.0 Release Notes", 
-                    "url": "/docs/apache-drill-1-1-0-release-notes/"
+                    "previous_title": "Apache Drill 0.9.0 Release Notes", 
+                    "previous_url": "/docs/apache-drill-0-9-0-release-notes/", 
+                    "relative_path": "_docs/rn/080-1.0.0-rn.md", 
+                    "title": "Apache Drill 1.0.0 Release Notes", 
+                    "url": "/docs/apache-drill-1-0-0-release-notes/"
                 }
             ], 
             "next_title": "Apache Drill 0.5.0 Release Notes", 
@@ -15757,8 +15740,8 @@
             "next_title": "AOL Search", 
             "next_url": "/docs/aol-search/", 
             "parent": "", 
-            "previous_title": "Apache Drill 1.1.0 Release Notes", 
-            "previous_url": "/docs/apache-drill-1-1-0-release-notes/", 
+            "previous_title": "Apache Drill 1.0.0 Release Notes", 
+            "previous_url": "/docs/apache-drill-1-0-0-release-notes/", 
             "relative_path": "_docs/140-sample-datasets.md", 
             "title": "Sample Datasets", 
             "url": "/docs/sample-datasets/"