You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@drill.apache.org by br...@apache.org on 2015/09/09 01:44:09 UTC

[1/4] drill git commit: resolve merge conflict

Repository: drill
Updated Branches:
  refs/heads/gh-pages 6513d421a -> be2f22234


resolve merge conflict


Project: http://git-wip-us.apache.org/repos/asf/drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/drill/commit/de783c42
Tree: http://git-wip-us.apache.org/repos/asf/drill/tree/de783c42
Diff: http://git-wip-us.apache.org/repos/asf/drill/diff/de783c42

Branch: refs/heads/gh-pages
Commit: de783c421eae2d0e37bd48ea792497c12f677239
Parents: 6513d42
Author: Kristine Hahn <kh...@maprtech.com>
Authored: Fri Sep 4 18:19:56 2015 -0700
Committer: Kristine Hahn <kh...@maprtech.com>
Committed: Fri Sep 4 18:19:56 2015 -0700

----------------------------------------------------------------------
 _data/docs.json                                 | 110 ++++++++++++++++---
 .../030-deploying-and-using-a-hive-udf.md       |   2 +-
 .../020-developing-a-simple-function.md         |   5 +-
 .../030-developing-an-aggregate-function.md     |   5 +-
 .../025-configuring-odbc-on-mac-os-x.md         |   2 +
 .../020-installing-the-driver-on-mac-os-x.md    |   6 +-
 .../080-configuring-jreport.md                  |   1 -
 .../090-configuring-jreport-with-drill.md       |  60 ++++++++++
 _docs/query-data/010-query-data-introduction.md |   4 +-
 _docs/query-data/030-querying-hbase.md          |  92 ++++++++++++++--
 _docs/query-data/050-querying-hive.md           |   2 +-
 _docs/rn/020-1.0.0-rn.md                        |  14 ++-
 .../data-types/010-supported-data-types.md      |  31 +++---
 .../030-handling-different-data-types.md        |   2 +-
 .../sql-commands/035-partition-by-clause.md     |   6 +-
 .../sql-functions/020-data-type-conversion.md   |  29 ++---
 .../sql-functions/040-string-manipulation.md    |   2 +-
 17 files changed, 305 insertions(+), 68 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/drill/blob/de783c42/_data/docs.json
----------------------------------------------------------------------
diff --git a/_data/docs.json b/_data/docs.json
index 4133374..85e50a2 100644
--- a/_data/docs.json
+++ b/_data/docs.json
@@ -1162,6 +1162,27 @@
             "title": "Configuring Drill Memory", 
             "url": "/docs/configuring-drill-memory/"
         }, 
+        "Configuring JReport with Drill": {
+            "breadcrumbs": [
+                {
+                    "title": "Using Drill with BI Tools", 
+                    "url": "/docs/using-drill-with-bi-tools/"
+                }, 
+                {
+                    "title": "ODBC/JDBC Interfaces", 
+                    "url": "/docs/odbc-jdbc-interfaces/"
+                }
+            ], 
+            "children": [], 
+            "next_title": "Query Data", 
+            "next_url": "/docs/query-data/", 
+            "parent": "Using Drill with BI Tools", 
+            "previous_title": "Using Apache Drill with Tableau 9 Server", 
+            "previous_url": "/docs/using-apache-drill-with-tableau-9-server/", 
+            "relative_path": "_docs/odbc-jdbc-interfaces/using-drill-with-bi-tools/090-configuring-jreport-with-drill.md", 
+            "title": "Configuring JReport with Drill", 
+            "url": "/docs/configuring-jreport-with-drill/"
+        }, 
         "Configuring Multitenant Resources": {
             "breadcrumbs": [
                 {
@@ -5504,14 +5525,35 @@
                                 }
                             ], 
                             "children": [], 
-                            "next_title": "Query Data", 
-                            "next_url": "/docs/query-data/", 
+                            "next_title": "Configuring JReport with Drill", 
+                            "next_url": "/docs/configuring-jreport-with-drill/", 
                             "parent": "Using Drill with BI Tools", 
                             "previous_title": "Using Apache Drill with Tableau 9 Desktop", 
                             "previous_url": "/docs/using-apache-drill-with-tableau-9-desktop/", 
                             "relative_path": "_docs/odbc-jdbc-interfaces/using-drill-with-bi-tools/070-using-apache-drill-with-tableau-9-server.md", 
                             "title": "Using Apache Drill with Tableau 9 Server", 
                             "url": "/docs/using-apache-drill-with-tableau-9-server/"
+                        }, 
+                        {
+                            "breadcrumbs": [
+                                {
+                                    "title": "Using Drill with BI Tools", 
+                                    "url": "/docs/using-drill-with-bi-tools/"
+                                }, 
+                                {
+                                    "title": "ODBC/JDBC Interfaces", 
+                                    "url": "/docs/odbc-jdbc-interfaces/"
+                                }
+                            ], 
+                            "children": [], 
+                            "next_title": "Query Data", 
+                            "next_url": "/docs/query-data/", 
+                            "parent": "Using Drill with BI Tools", 
+                            "previous_title": "Using Apache Drill with Tableau 9 Server", 
+                            "previous_url": "/docs/using-apache-drill-with-tableau-9-server/", 
+                            "relative_path": "_docs/odbc-jdbc-interfaces/using-drill-with-bi-tools/090-configuring-jreport-with-drill.md", 
+                            "title": "Configuring JReport with Drill", 
+                            "url": "/docs/configuring-jreport-with-drill/"
                         }
                     ], 
                     "next_title": "Using Drill with BI Tools Introduction", 
@@ -6626,8 +6668,8 @@
             "next_title": "Query Data Introduction", 
             "next_url": "/docs/query-data-introduction/", 
             "parent": "", 
-            "previous_title": "Using Apache Drill with Tableau 9 Server", 
-            "previous_url": "/docs/using-apache-drill-with-tableau-9-server/", 
+            "previous_title": "Configuring JReport with Drill", 
+            "previous_url": "/docs/configuring-jreport-with-drill/", 
             "relative_path": "_docs/070-query-data.md", 
             "title": "Query Data", 
             "url": "/docs/query-data/"
@@ -10816,8 +10858,8 @@
                 }
             ], 
             "children": [], 
-            "next_title": "Query Data", 
-            "next_url": "/docs/query-data/", 
+            "next_title": "Configuring JReport with Drill", 
+            "next_url": "/docs/configuring-jreport-with-drill/", 
             "parent": "Using Drill with BI Tools", 
             "previous_title": "Using Apache Drill with Tableau 9 Desktop", 
             "previous_url": "/docs/using-apache-drill-with-tableau-9-desktop/", 
@@ -11069,14 +11111,35 @@
                         }
                     ], 
                     "children": [], 
-                    "next_title": "Query Data", 
-                    "next_url": "/docs/query-data/", 
+                    "next_title": "Configuring JReport with Drill", 
+                    "next_url": "/docs/configuring-jreport-with-drill/", 
                     "parent": "Using Drill with BI Tools", 
                     "previous_title": "Using Apache Drill with Tableau 9 Desktop", 
                     "previous_url": "/docs/using-apache-drill-with-tableau-9-desktop/", 
                     "relative_path": "_docs/odbc-jdbc-interfaces/using-drill-with-bi-tools/070-using-apache-drill-with-tableau-9-server.md", 
                     "title": "Using Apache Drill with Tableau 9 Server", 
                     "url": "/docs/using-apache-drill-with-tableau-9-server/"
+                }, 
+                {
+                    "breadcrumbs": [
+                        {
+                            "title": "Using Drill with BI Tools", 
+                            "url": "/docs/using-drill-with-bi-tools/"
+                        }, 
+                        {
+                            "title": "ODBC/JDBC Interfaces", 
+                            "url": "/docs/odbc-jdbc-interfaces/"
+                        }
+                    ], 
+                    "children": [], 
+                    "next_title": "Query Data", 
+                    "next_url": "/docs/query-data/", 
+                    "parent": "Using Drill with BI Tools", 
+                    "previous_title": "Using Apache Drill with Tableau 9 Server", 
+                    "previous_url": "/docs/using-apache-drill-with-tableau-9-server/", 
+                    "relative_path": "_docs/odbc-jdbc-interfaces/using-drill-with-bi-tools/090-configuring-jreport-with-drill.md", 
+                    "title": "Configuring JReport with Drill", 
+                    "url": "/docs/configuring-jreport-with-drill/"
                 }
             ], 
             "next_title": "Using Drill with BI Tools Introduction", 
@@ -13014,14 +13077,35 @@
                                 }
                             ], 
                             "children": [], 
-                            "next_title": "Query Data", 
-                            "next_url": "/docs/query-data/", 
+                            "next_title": "Configuring JReport with Drill", 
+                            "next_url": "/docs/configuring-jreport-with-drill/", 
                             "parent": "Using Drill with BI Tools", 
                             "previous_title": "Using Apache Drill with Tableau 9 Desktop", 
                             "previous_url": "/docs/using-apache-drill-with-tableau-9-desktop/", 
                             "relative_path": "_docs/odbc-jdbc-interfaces/using-drill-with-bi-tools/070-using-apache-drill-with-tableau-9-server.md", 
                             "title": "Using Apache Drill with Tableau 9 Server", 
                             "url": "/docs/using-apache-drill-with-tableau-9-server/"
+                        }, 
+                        {
+                            "breadcrumbs": [
+                                {
+                                    "title": "Using Drill with BI Tools", 
+                                    "url": "/docs/using-drill-with-bi-tools/"
+                                }, 
+                                {
+                                    "title": "ODBC/JDBC Interfaces", 
+                                    "url": "/docs/odbc-jdbc-interfaces/"
+                                }
+                            ], 
+                            "children": [], 
+                            "next_title": "Query Data", 
+                            "next_url": "/docs/query-data/", 
+                            "parent": "Using Drill with BI Tools", 
+                            "previous_title": "Using Apache Drill with Tableau 9 Server", 
+                            "previous_url": "/docs/using-apache-drill-with-tableau-9-server/", 
+                            "relative_path": "_docs/odbc-jdbc-interfaces/using-drill-with-bi-tools/090-configuring-jreport-with-drill.md", 
+                            "title": "Configuring JReport with Drill", 
+                            "url": "/docs/configuring-jreport-with-drill/"
                         }
                     ], 
                     "next_title": "Using Drill with BI Tools Introduction", 
@@ -13419,8 +13503,8 @@
             "next_title": "Query Data Introduction", 
             "next_url": "/docs/query-data-introduction/", 
             "parent": "", 
-            "previous_title": "Using Apache Drill with Tableau 9 Server", 
-            "previous_url": "/docs/using-apache-drill-with-tableau-9-server/", 
+            "previous_title": "Configuring JReport with Drill", 
+            "previous_url": "/docs/configuring-jreport-with-drill/", 
             "relative_path": "_docs/070-query-data.md", 
             "title": "Query Data", 
             "url": "/docs/query-data/"
@@ -15871,4 +15955,4 @@
             "url": "/docs/project-bylaws/"
         }
     ]
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/drill/blob/de783c42/_docs/data-sources-and-file-formats/030-deploying-and-using-a-hive-udf.md
----------------------------------------------------------------------
diff --git a/_docs/data-sources-and-file-formats/030-deploying-and-using-a-hive-udf.md b/_docs/data-sources-and-file-formats/030-deploying-and-using-a-hive-udf.md
index 53100ac..e1d7706 100644
--- a/_docs/data-sources-and-file-formats/030-deploying-and-using-a-hive-udf.md
+++ b/_docs/data-sources-and-file-formats/030-deploying-and-using-a-hive-udf.md
@@ -48,7 +48,7 @@ To set up the UDF:
    `<drill installation directory>/bin/drillbit.sh restart`
  
 ## Using a UDF
-Use a Hive UDF just as you would use a Drill custom function. For example, to query using a Hive UDF named upper-to-lower that takes a column.value argument, the SELECT statement looks something like this:  
+Use a Hive UDF just as you would use a Drill custom function. For example, to query using a Hive UDF named MY_UPPER, the SELECT statement looks something like this:  
      
     SELECT MY_UPPER('abc') from (VALUES(1));
     +---------+

http://git-wip-us.apache.org/repos/asf/drill/blob/de783c42/_docs/develop-custom-functions/020-developing-a-simple-function.md
----------------------------------------------------------------------
diff --git a/_docs/develop-custom-functions/020-developing-a-simple-function.md b/_docs/develop-custom-functions/020-developing-a-simple-function.md
index e807e89..83fc3ad 100644
--- a/_docs/develop-custom-functions/020-developing-a-simple-function.md
+++ b/_docs/develop-custom-functions/020-developing-a-simple-function.md
@@ -6,7 +6,10 @@ The section presents a brief overview of developing a simple function. The [tuto
 
 To develop a simple function, you need to create a class within a Java package that implements Drill’s simple interface
 into the program, and include the required information for the function type.
-Your function must use [data types]({{ site.baseurl }}/docs/supported-data-types/) that Drill supports. 
+Your function must use [data types]({{ site.baseurl }}/docs/supported-data-types/) that Drill supports. Keep the following requirements in mind:
+
+* Annotate all data that a UDF uses.
+* Fully qualify class references.
 
 Complete the following steps to develop a simple function using Drill’s simple
 function interface:

http://git-wip-us.apache.org/repos/asf/drill/blob/de783c42/_docs/develop-custom-functions/030-developing-an-aggregate-function.md
----------------------------------------------------------------------
diff --git a/_docs/develop-custom-functions/030-developing-an-aggregate-function.md b/_docs/develop-custom-functions/030-developing-an-aggregate-function.md
index fbbec2a..c82a202 100644
--- a/_docs/develop-custom-functions/030-developing-an-aggregate-function.md
+++ b/_docs/develop-custom-functions/030-developing-an-aggregate-function.md
@@ -5,7 +5,10 @@ parent: "Develop Custom Functions"
 The API for developing aggregate custom functions is at the alpha stage and intended for experimental use only. To experiment with this API, create a class within a Java package that implements Drill’s aggregate
 interface into the program. Include the required information for the function.
 Your function must include data types that Drill supports, such as INTEGER or
-BIGINT. For a list of supported data types, refer to the [SQL Reference]({{ site.baseurl }}/docs/supported-data-types/).
+BIGINT. For a list of supported data types, refer to the [SQL Reference]({{ site.baseurl }}/docs/supported-data-types/). Keep the following guidelines in mind:
+
+* Do not use complex @Workspace variables. 
+* You cannot allocate a Repeated* value or have a ComplexWriter in the @Workspace.
 
 Complete the following steps to create an aggregate function:
 

http://git-wip-us.apache.org/repos/asf/drill/blob/de783c42/_docs/odbc-jdbc-interfaces/configuring-odbc/025-configuring-odbc-on-mac-os-x.md
----------------------------------------------------------------------
diff --git a/_docs/odbc-jdbc-interfaces/configuring-odbc/025-configuring-odbc-on-mac-os-x.md b/_docs/odbc-jdbc-interfaces/configuring-odbc/025-configuring-odbc-on-mac-os-x.md
index 926410f..6c00db3 100644
--- a/_docs/odbc-jdbc-interfaces/configuring-odbc/025-configuring-odbc-on-mac-os-x.md
+++ b/_docs/odbc-jdbc-interfaces/configuring-odbc/025-configuring-odbc-on-mac-os-x.md
@@ -33,6 +33,8 @@ The installer for Mac OS X creates a sample user DSN in odbc.ini in either of th
 
 Depending on the driver manager you use, the user DSN in one of these files will be effective.
 
+{% include startnote.html %}The System and User DSN use different ini files in different locations on OS X.{% include endnote.html %}
+
 ----------
 
 ## Step 1: Set Environment Variables 

http://git-wip-us.apache.org/repos/asf/drill/blob/de783c42/_docs/odbc-jdbc-interfaces/installing-the-odbc-driver/020-installing-the-driver-on-mac-os-x.md
----------------------------------------------------------------------
diff --git a/_docs/odbc-jdbc-interfaces/installing-the-odbc-driver/020-installing-the-driver-on-mac-os-x.md b/_docs/odbc-jdbc-interfaces/installing-the-odbc-driver/020-installing-the-driver-on-mac-os-x.md
index 725b33c..4a7bd3b 100644
--- a/_docs/odbc-jdbc-interfaces/installing-the-odbc-driver/020-installing-the-driver-on-mac-os-x.md
+++ b/_docs/odbc-jdbc-interfaces/installing-the-odbc-driver/020-installing-the-driver-on-mac-os-x.md
@@ -7,9 +7,9 @@ the Drill service.
 
 Install the MapR Drill ODBC Driver on a system that meets the [system requirements]({{site.baseurl}}/docs/installing-the-driver-on-mac-os-x/#system-requirements). Complete the following steps, described in detail in this document:
 
-  * [Step 1: Download the MapR Drill ODBC Driver]({{site.baseurl}}/docs/install-the-driver-on-mac-os-x/#step-1:-download-the-mapr-drill-odbc-driver) 
-  * [Step 2: Install the MapR Drill ODBC Driver]({{site.baseurl}}/docs/installing-the-driver-on-mac-os-x/#step-2:-install-the-mapr-drill-odbc-driver) 
-  * [Step 3: Check the MapR Drill ODBC Driver Version]({{site.baseurl}}docs/installing-the-driver-on-mac-os-x/#step-3:-check-the-mapr-drill-odbc-driver-version)
+  * [Step 1: Download the MapR Drill ODBC Driver]({{site.baseurl}}/docs/installing-the-driver-on-mac-os-x/#step-1-download-the-mapr-drill-odbc-driver)  
+  * [Step 2: Install the MapR Drill ODBC Driver]({{site.baseurl}}/docs/installing-the-driver-on-mac-os-x/#step-2-install-the-mapr-drill-odbc-driver) 
+  * [Step 3: Check the MapR Drill ODBC Driver Version]({{site.baseurl}}/docs/installing-the-driver-on-mac-os-x/#step-3-check-the-mapr-drill-odbc-driver-version)
 
 ## System Requirements
 

http://git-wip-us.apache.org/repos/asf/drill/blob/de783c42/_docs/odbc-jdbc-interfaces/using-drill-with-bi-tools/080-configuring-jreport.md
----------------------------------------------------------------------
diff --git a/_docs/odbc-jdbc-interfaces/using-drill-with-bi-tools/080-configuring-jreport.md b/_docs/odbc-jdbc-interfaces/using-drill-with-bi-tools/080-configuring-jreport.md
deleted file mode 100644
index 35f147a..0000000
--- a/_docs/odbc-jdbc-interfaces/using-drill-with-bi-tools/080-configuring-jreport.md
+++ /dev/null
@@ -1 +0,0 @@
----
title: "Configuring JReport with Drill"
parent: "Using Drill with BI Tools"
---

JReport is an embeddable BI solution that empowers users to analyze data and create reports and dashboards. JReport accesses data from Hadoop systems through Apache Drill. By visualizing data through Drill, users can perform their own reporting and data discovery for agile, on-the-fly decision-making.

You can use JReport 13.1 and the Apache Drill JDBC Driver to easily extract data and visualize it, creating reports and dashboards that you can embed into your own applications. Complete the following simple steps to use Apache Drill with JReport:

1. Install the Drill JDBC Driver with JReport.
2. Create a new JReport Catalog to manage the Drill connection.
3. Use JReport Designer to query the data and create a report.

----------

### Step 1: Install the Drill JDBC Driver with JReport

Drill provides standard JDBC connectivity to integrate with JReport. JReport 13.1 requires Drill 1.0 or later.
For g
 eneral instructions on installing the Drill JDBC driver, see [Using JDBC]({{ site.baseurl }}/docs/using-the-jdbc-driver/).

1. Locate the JDBC driver in the Drill installation directory on any node where Drill is installed on the cluster: 
        <drill-home>/jars/jdbc-driver/drill-jdbc-all-<drill-version>.jar 
   
2. Copy the Drill JDBC driver into the JReport `lib` folder:
        %REPORTHOME%\lib\
   For example, on Windows, copy the Drill JDBC driver jar file into:
   
        C:\JReport\Designer\lib\drill-jdbc-all-1.0.0.jar
    
3.	Add the location of the JAR file to the JReport CLASSPATH variable. On Windows, edit the `C:\JReport\Designer\bin\setenv.bat` file:
    ![drill query flow]({{ site.baseurl }}/docs/img/jreport_setenv.png)

4. Verify that the JReport system can resolve the hostnames of the ZooKeeper nodes of the Drill cluster. You can do this by configuring DNS for all of the systems. Alternatively, you can edit the hosts file on the JReport system to include the host
 names and IP addresses of all the ZooKeeper nodes used with the Drill cluster.  For Linux systems, the hosts file is located at `/etc/hosts`. For Windows systems, the hosts file is located at `%WINDIR%\system32\drivers\etc\hosts`  Here is an example of a Windows hosts file: ![drill query flow]({{ site.baseurl }}/docs/img/jreport-hostsfile.png)

----------

### Step 2: Create a New JReport Catalog to Manage the Drill Connection

1.	Click Create **New -> Catalog…**
2.	Provide a catalog file name and click **…** to choose the file-saving location.
3.	Click **View -> Catalog Browser**.
4.	Right-click **Data Source 1** and select **Add JDBC Connection**.
5.	Fill in the **Driver**, **URL**, **User**, and **Password** fields. ![drill query flow]({{ site.baseurl }}/docs/img/jreport-catalogbrowser.png)
6.	Click **Options** and select the **Qualifier** tab. 
7.	In the **Quote Qualifier** section, choose **User Defined** and change the quote character from “ to ` (backtick). ![drill quer
 y flow]({{ site.baseurl }}/docs/img/jreport-quotequalifier.png)
8.	Click **OK**. JReport will verify the connection and save all information.
9.	Add tables and views to the JReport catalog by right-clicking the connection node and choosing **Add Table**. Now you can browse the schemas and add specific tables that you want to make available for building queries. ![drill query flow]({{ site.baseurl }}/docs/img/jreport-addtable.png)
10.	Click **Done** when you have added all the tables you need. 


### Step 3: Use JReport Designer

1.	In the Catalog Browser, right-click **Queries** and select **Add Query…**
2.	Define a JReport query by using the Query Editor. You can also import your own SQL statements. ![drill query flow]({{ site.baseurl }}/docs/img/jreport-queryeditor.png)
3.	Click **OK** to close the Query Editor, and click the **Save Catalog** button to save your progress to the catalog file. 
    **Note**: If the report returns errors, you may need to edit the query and add the 
 schema in front of the table name: `select column from schema.table_name` You can do this by clicking the **SQL** button on the Query Editor.

4.  Use JReport Designer to query the data and create a report. ![drill query flow]({{ site.baseurl }}/docs/img/jreport-crosstab.png)
    ![drill query flow]({{ site.baseurl }}/docs/img/jreport-crosstab2.png)
    ![drill query flow]({{ site.baseurl }}/docs/img/jreport-crosstab3.png)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/drill/blob/de783c42/_docs/odbc-jdbc-interfaces/using-drill-with-bi-tools/090-configuring-jreport-with-drill.md
----------------------------------------------------------------------
diff --git a/_docs/odbc-jdbc-interfaces/using-drill-with-bi-tools/090-configuring-jreport-with-drill.md b/_docs/odbc-jdbc-interfaces/using-drill-with-bi-tools/090-configuring-jreport-with-drill.md
new file mode 100644
index 0000000..8895e1c
--- /dev/null
+++ b/_docs/odbc-jdbc-interfaces/using-drill-with-bi-tools/090-configuring-jreport-with-drill.md
@@ -0,0 +1,60 @@
+---
+title: "Configuring JReport with Drill"
+parent: "Using Drill with BI Tools"
+---
+
+JReport is an embeddable BI solution that empowers users to analyze data and create reports and dashboards. JReport accesses data from Hadoop systems through Apache Drill. By visualizing data through Drill, users can perform their own reporting and data discovery for agile, on-the-fly decision-making.
+
+You can use JReport 13.1 and the Apache Drill JDBC Driver to easily extract data and visualize it, creating reports and dashboards that you can embed into your own applications. Complete the following simple steps to use Apache Drill with JReport:
+
+1. Install the Drill JDBC Driver with JReport.
+2. Create a new JReport Catalog to manage the Drill connection.
+3. Use JReport Designer to query the data and create a report.
+
+----------
+
+### Step 1: Install the Drill JDBC Driver with JReport
+
+Drill provides standard JDBC connectivity to integrate with JReport. JReport 13.1 requires Drill 1.0 or later.
+For general instructions on installing the Drill JDBC driver, see [Using JDBC]({{ site.baseurl }}/docs/using-the-jdbc-driver/).
+
+1. Locate the JDBC driver in the Drill installation directory on any node where Drill is installed on the cluster: 
+        <drill-home>/jars/jdbc-driver/drill-jdbc-all-<drill-version>.jar 
+   
+2. Copy the Drill JDBC driver into the JReport `lib` folder:
+        %REPORTHOME%\lib\
+   For example, on Windows, copy the Drill JDBC driver jar file into:
+   
+        C:\JReport\Designer\lib\drill-jdbc-all-1.0.0.jar
+    
+3.  Add the location of the JAR file to the JReport CLASSPATH variable. On Windows, edit the `C:\JReport\Designer\bin\setenv.bat` file:
+    ![drill query flow]({{ site.baseurl }}/docs/img/jreport_setenv.png)
+
+4. Verify that the JReport system can resolve the hostnames of the ZooKeeper nodes of the Drill cluster. You can do this by configuring DNS for all of the systems. Alternatively, you can edit the hosts file on the JReport system to include the hostnames and IP addresses of all the ZooKeeper nodes used with the Drill cluster.  For Linux systems, the hosts file is located at `/etc/hosts`. For Windows systems, the hosts file is located at `%WINDIR%\system32\drivers\etc\hosts`  Here is an example of a Windows hosts file: ![drill query flow]({{ site.baseurl }}/docs/img/jreport-hostsfile.png)
+
+----------
+
+### Step 2: Create a New JReport Catalog to Manage the Drill Connection
+
+1.  Click Create **New -> Catalog…**
+2.  Provide a catalog file name and click **…** to choose the file-saving location.
+3.  Click **View -> Catalog Browser**.
+4.  Right-click **Data Source 1** and select **Add JDBC Connection**.
+5.  Fill in the **Driver**, **URL**, **User**, and **Password** fields. ![drill query flow]({{ site.baseurl }}/docs/img/jreport-catalogbrowser.png)
+6.  Click **Options** and select the **Qualifier** tab. 
+7.  In the **Quote Qualifier** section, choose **User Defined** and change the quote character from “ to ` (backtick). ![drill query flow]({{ site.baseurl }}/docs/img/jreport-quotequalifier.png)
+8.  Click **OK**. JReport will verify the connection and save all information.
+9.  Add tables and views to the JReport catalog by right-clicking the connection node and choosing **Add Table**. Now you can browse the schemas and add specific tables that you want to make available for building queries. ![drill query flow]({{ site.baseurl }}/docs/img/jreport-addtable.png)
+10. Click **Done** when you have added all the tables you need. 
+
+
+### Step 3: Use JReport Designer
+
+1.  In the Catalog Browser, right-click **Queries** and select **Add Query…**
+2.  Define a JReport query by using the Query Editor. You can also import your own SQL statements. ![drill query flow]({{ site.baseurl }}/docs/img/jreport-queryeditor.png)
+3.  Click **OK** to close the Query Editor, and click the **Save Catalog** button to save your progress to the catalog file. 
+    **Note**: If the report returns errors, you may need to edit the query and add the schema in front of the table name: `select column from schema.table_name` You can do this by clicking the **SQL** button on the Query Editor.
+
+4.  Use JReport Designer to query the data and create a report. ![drill query flow]({{ site.baseurl }}/docs/img/jreport-crosstab.png)
+    ![drill query flow]({{ site.baseurl }}/docs/img/jreport-crosstab2.png)
+    ![drill query flow]({{ site.baseurl }}/docs/img/jreport-crosstab3.png)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/drill/blob/de783c42/_docs/query-data/010-query-data-introduction.md
----------------------------------------------------------------------
diff --git a/_docs/query-data/010-query-data-introduction.md b/_docs/query-data/010-query-data-introduction.md
index 310a7b4..536dcc3 100644
--- a/_docs/query-data/010-query-data-introduction.md
+++ b/_docs/query-data/010-query-data-introduction.md
@@ -4,7 +4,7 @@ parent: "Query Data"
 ---
 You can query local and distributed file systems, Hive, HBase data, complex data, INFORMATION SCHEMA, and system tables as described in the subtopics of this section. 
 
-The query specifies the data source location and include data casting. 
+The query specifies the data source location and includes data casting. 
 
 ## Specifying the Data Source Location
 The optional [USE statement]({{site.baseurl}}/docs/use) runs subsequent queries against a particular [storage plugin]({{site.baseurl}}/docs/connect-a-data-source-introduction/). The USE statement typically saves typing some of the storage plugin information in the FROM statement. If you omit the USE statement, specify a storage plugin, such as dfs, and optionally a workspace, such as default, and a path to the data source using dot notation and back ticks. For example:
@@ -17,7 +17,7 @@ In some cases, Drill converts schema-less data to correctly-typed data implicitl
 For example, you have to cast a string `"100"` in a JSON file to an integer in order to apply a math function
 or an aggregate function.
 
-Use CONVERT_TO and CONVERT_FROM instead of the CAST function for converting binary data types, as described in section "[CONVERT_TO and CONVERT_FROM Usage Notes](/docs/data-type-conversion/#convert_to-and-convert_from-usage-notes)".
+To query HBase data using Drill, convert every column of an HBase table to/from byte arrays from/to an SQL data type as described in the section ["Querying HBase"]({{ site.baseurl}}/docs/querying-hbase/). Use [CONVERT_TO or CONVERT_FROM]({{ site.baseurl }}/docs//data-type-conversion/#convert_to-and-convert_from) functions to perform conversions of HBase data.
 
 ## Troubleshooting Queries
 

http://git-wip-us.apache.org/repos/asf/drill/blob/de783c42/_docs/query-data/030-querying-hbase.md
----------------------------------------------------------------------
diff --git a/_docs/query-data/030-querying-hbase.md b/_docs/query-data/030-querying-hbase.md
index f24846e..8b4009f 100644
--- a/_docs/query-data/030-querying-hbase.md
+++ b/_docs/query-data/030-querying-hbase.md
@@ -2,14 +2,88 @@
 title: "Querying HBase"
 parent: "Query Data"
 ---
-This exercise creates two tables in HBase, students and clicks, that you can query with Drill. You use the CONVERT_TO and CONVERT_FROM functions to convert binary text to readable output. You use the CAST function to convert the binary INT to readable output in step 4 of [Query HBase Tables]({{site.baseurl}}/docs/querying-hbase/#query-hbase-tables). When converting an INT or BIGINT number, having a byte count in the destination/source that does not match the byte count of the number in the VARBINARY source/destination, use CAST.
+To use Drill to query HBase data, you need to understand how to work with the HBase byte arrays. If you want Drill to interpret the underlying HBase row key as something other than a byte array, you need to know the encoding of the data in HBase. By default, HBase stores data in little endian and Drill assumes the data is little endian, which is unsorted. The following table shows the sorting of typical rowkey IDs in bytes, encoded in little endian and big endian, respectively:
 
-## Create the HBase tables
+| IDs in Byte Notation Little Endian Sorting | IDs in Decimal Notation | IDs in Byte Notation Big Endian Sorting | IDs in Decimal Notation |
+|--------------------------------------------|-------------------------|-----------------------------------------|-------------------------|
+| 0 x 010000 . . . 000                       | 1                       | 0 x 010000 . . . 000                    | 1                       |
+| 0 x 010100 . . . 000                       | 17                      | 0 x 020000 . . . 000                    | 2                       |
+| 0 x 020000 . . . 000                       | 2                       | 0 x 030000 . . . 000                    | 3                       |
+| . . .                                      |                         | 0 x 040000 . . . 000                    | 4                       |
+| 0x 050000 . . . 000                        | 5                       | 0 x 050000 . . . 000                    | 5                       |
+| . . .                                      |                         | . . .                                   |                         |
+| 0 x 0A000000                               | 10                      | 0 x 0A0000 . . . 000                    | 10                      |
+|                                            |                         | 0 x 010100 . . . 000                    | 17                      |
+
+## Querying Big Endian-Encoded Data
+
+Drill optimizes scans of HBase tables when you use the ["CONVERT_TO and CONVERT_FROM data types"]({{ site.baseurl }}/docs/supported-data-types/#convert_to-and-convert_from-data-types) on big endian-encoded data. Drill provides the \*\_BE encoded types for use with CONVERT_TO and CONVERT_FROM to take advantage of these optimizations. Here are a few examples of the \*\_BE types.
+
+* DATE_EPOCH_BE  
+* TIME_EPOCH_BE  
+* TIMESTAMP_EPOCH_BE  
+* UINT8_BE  
+* BIGINT_BE  
+
+For example, Drill returns results performantly when you use the following query on big endian-encoded data:
+
+```
+SELECT
+ CONVERT_FROM(BYTE_SUBSTR(row_key, 1, 8), 'DATE_EPOCH_BE') d
+, CONVERT_FROM(BYTE_SUBSTR(row_key, 9, 8), 'BIGINT_BE') id
+, CONVERT_FROM(tableName.f.c, 'UTF8') 
+ FROM hbase.`TestTableCompositeDate` tableName
+ WHERE
+ CONVERT_FROM(BYTE_SUBSTR(row_key, 1, 8), 'DATE_EPOCH_BE') < DATE '2015-06-18' AND
+ CONVERT_FROM(BYTE_SUBSTR(row_key, 1, 8), 'DATE_EPOCH_BE') > DATE '2015-06-13';
+```
+
+This query assumes that the row key of the table represents the DATE_EPOCH type encoded in big-endian format. The Drill HBase plugin will be able to prune the scan range since there is a condition on the big endian-encoded prefix of the row key. For more examples, see the [test code:](https://github.com/apache/drill/blob/95623912ebf348962fe8a8846c5f47c5fdcf2f78/contrib/storage-hbase/src/test/java/org/apache/drill/hbase/TestHBaseFilterPushDown.java).
+
+To query HBase data:
+
+1. Connect the data source to Drill using the [HBase storage plugin]({{site.baseurl}}/docs/hbase-storage-plugin/).  
+2. Determine the encoding of the HBase data you want to query. Ask the person in charge of creating the data.  
+3. Based on the encoding type of the data, use the ["CONVERT_TO and CONVERT_FROM data types"]({{ site.baseurl }}/docs/supported-data-types/#convert_to-and-convert_from-data-types) to convert HBase binary representations to an SQL type as you query the data.  
+    For example, use CONVERT_FROM in your Drill query to convert a big endian-encoded row key to an SQL BIGINT type:  
+
+    `SELECT CONVERT_FROM(BYTE_SUBSTR(row_key, 1, 8),'BIGINT_BE’) FROM my_hbase_table;`
+
+The [BYTE_SUBSTR function]({{ site.baseurl }}/docs/string-manipulation/#byte_substr) separates parts of a HBase composite key in this example. The Drill optimization is based on the capability in Drill 1.2 and later to push conditional filters down to the storage layer when HBase data is in big endian format. 
+
+Drill can performantly query HBase data that uses composite keys, as shown in the last example, if only the first component of the composite is encoded in big endian format. If the HBase row key is not stored in big endian, do not use the \*\_BE types. If you want to convert a little endian byte array to integer, use BIGINT instead of BIGINT_BE, for example, as an argument to CONVERT_FROM. 
+
+## Leveraging HBase Ordered Byte Encoding
+
+Drill 1.2 leverages new features introduced by [HBASE-8201 Jira](https://issues.apache.org/jira/browse/HBASE-8201) that allows ordered byte encoding of different data types. This encoding scheme preserves the sort order of the native data type when the data is stored as sorted byte arrays on disk. Thus, Drill will be able to process data through the HBase storage plugin if the row keys have been encoded in OrderedBytes format.
+
+To execute the following query, Drill prunes the scan range to only include the row keys representing [-32,59) range, thus reducing the amount of data read.
+
+```
+SELECT
+ CONVERT_FROM(t.row_key, 'INT_OB') rk,
+ CONVERT_FROM(t.`f`.`c`, 'UTF8') val
+FROM
+  hbase.`TestTableIntOB` t
+WHERE
+  CONVERT_FROM(row_key, 'INT_OB') >= cast(-32 as INT) AND
+  CONVERT_FROM(row_key, 'INT_OB') < cast(59 as INT);
+```
+
+For more examples, see the [test code:](https://github.com/apache/drill/blob/95623912ebf348962fe8a8846c5f47c5fdcf2f78/contrib/storage-hbase/src/test/java/org/apache/drill/hbase/TestHBaseFilterPushDown.java).
+
+By taking advantage of ordered byte encoding, Drill 1.2 and later can performantly execute conditional queries without a secondary index on HBase big endian data. 
+
+## Querying Little Endian-Encoded Data
+
+As mentioned earlier, HBase stores data in little endian by default and Drill assumes the data is encoded in little endian. This exercise involves working with data that is encoded in little endian. First, you create two tables in HBase, students and clicks, that you can query with Drill. You use the CONVERT_TO and CONVERT_FROM functions to convert binary text to/from typed data. You use the CAST function to convert the binary data to an INT in step 4 of [Query HBase Tables]({{site.baseurl}}/docs/querying-hbase/#query-hbase-tables). When converting an INT or BIGINT number, having a byte count in the destination/source that does not match the byte count of the number in the binary source/destination, use CAST.
+
+### Create the HBase tables
 
 To create the HBase tables and start Drill, complete the following
 steps:
 
-1. Pipe the following commands to the HBase shell to create students and  clicks tables in HBase:
+1. Pipe the following commands to the HBase shell to create students and clicks tables in HBase:
   
           echo "create 'students','account','address'" | hbase shell
           echo "create 'clicks','clickinfo','iteminfo'" | hbase shell
@@ -82,15 +156,15 @@ steps:
         put 'clicks','click9','iteminfo:itemtype','image'
         put 'clicks','click9','iteminfo:quantity','10'
 
-4. Issue the following command to put the data into hbase:  
+4. Issue the following command to put the data into HBase:  
   
         cat testdata.txt | hbase shell
 
-## Query HBase Tables
+### Query HBase Tables
 1. Issue the following query to see the data in the students table:  
 
        SELECT * FROM students;
-   The query returns binary results:
+   The query returns results that are not useable. In the next step, you convert the data.
   
         +-------------+-----------------------+---------------------------------------------------------------------------+
         |  row_key    |  account              |                                address                                    |
@@ -102,9 +176,7 @@ steps:
         +-------------+-----------------------+---------------------------------------------------------------------------+
         4 rows selected (1.335 seconds)
 
-   The Drill output reflects the actual data type of the HBase data, which is binary.
-
-2. Issue the following query, that includes the CONVERT_FROM function, to convert the `students` table to readable data:
+2. Issue the following query, that includes the CONVERT_FROM function, to convert the `students` table to typed data:
 
          SELECT CONVERT_FROM(row_key, 'UTF8') AS studentid, 
                 CONVERT_FROM(students.account.name, 'UTF8') AS name, 
@@ -116,7 +188,7 @@ steps:
     {% include startnote.html %}Use dot notation to drill down to a column in an HBase table: tablename.columnfamilyname.columnnname{% include endnote.html %}
     
 
-    The query returns readable data:
+    The query returns results that look much better:
 
         +------------+------------+------------+------------------+------------+
         | studentid  |    name    |   state    |       street     |  zipcode   |

http://git-wip-us.apache.org/repos/asf/drill/blob/de783c42/_docs/query-data/050-querying-hive.md
----------------------------------------------------------------------
diff --git a/_docs/query-data/050-querying-hive.md b/_docs/query-data/050-querying-hive.md
index 515200a..8673525 100644
--- a/_docs/query-data/050-querying-hive.md
+++ b/_docs/query-data/050-querying-hive.md
@@ -16,7 +16,7 @@ To create a Hive table and query it with Drill, complete the following steps:
         hive> create table customers(FirstName string, LastName string, Company string, Address string, City string, County string, State string, Zip string, Phone string, Fax string, Email string, Web string) row format delimited fields terminated by ',' stored as textfile;
   3. Issue the following command to load the customer data into the customers table:  
 
-        hive> load data local inpath '/<directory path>/customers.csv' overwrite into table customers;`
+        hive> load data local inpath '/<directory path>/customers.csv' overwrite into table customers;
   4. Issue `quit` or `exit` to leave the Hive shell.
   5. Start the Drill shell. 
   6. Issue the following query to Drill to get the first and last names of the first ten customers in the Hive table:  

http://git-wip-us.apache.org/repos/asf/drill/blob/de783c42/_docs/rn/020-1.0.0-rn.md
----------------------------------------------------------------------
diff --git a/_docs/rn/020-1.0.0-rn.md b/_docs/rn/020-1.0.0-rn.md
index 73fd453..fc56991 100755
--- a/_docs/rn/020-1.0.0-rn.md
+++ b/_docs/rn/020-1.0.0-rn.md
@@ -2,7 +2,7 @@
 title: "Apache Drill 1.0.0 Release Notes"
 parent: "Release Notes"
 ---
- Today we're happy to announce the availability of Drill 1.0.0, providing additional enhancements and bug fixes. This release includes the following new features, enhancements, and bug fixes:
+ Today we're happy to announce the availability of Drill 1.0.0, providing additional enhancements and bug fixes. This release includes the following new features, enhancements, unresolved issues, and bug fixes:
     
 <h2>        Sub-task
 </h2>
@@ -25,7 +25,17 @@ parent: "Release Notes"
 </li>
 </ul>
                         
-<h2>        Bug
+<h2> Unresolved Issues
+</h2>
+<ul>
+    <li>[<a href='https://issues.apache.org/jira/browse/DRILL-1868'>DRILL-1868</a>] Aliases are not allowed in WHERE, HAVING and GROUP BY clauses. Drill should return an error when such aliases are encountered, but instead Drill returns an incorrect result. 
+    </li>
+    <li>[<a href='https://issues.apache.org/jira/browse/DRILL-2015'>DRILL-2015</a>] Casting a numeric value that does not fit the data type of the value and causes overflow returns an incorrect result. 
+    </li>
+    <li>[<a href='https://issues.apache.org/jira/browse/DRILL-2355'>DRILL-2355</a>] Drill output from the TRUNC function in some cases includes an extra .0 in the result. Drill binds TRUNC functions having two input parameters to the function holder. The output type of TRUNC functions is FLOAT8 when the input is FLOAT8, resulting in the extra .0. 
+    </li>
+</ul>
+<h2>        Bug Fixes
 </h2>
 <ul>
 <li>[<a href='https://issues.apache.org/jira/browse/DRILL-148'>DRILL-148</a>] -         Remove sandbox directory from source control, it is no longer utilized

http://git-wip-us.apache.org/repos/asf/drill/blob/de783c42/_docs/sql-reference/data-types/010-supported-data-types.md
----------------------------------------------------------------------
diff --git a/_docs/sql-reference/data-types/010-supported-data-types.md b/_docs/sql-reference/data-types/010-supported-data-types.md
index 1c1d13b..3acc6bd 100644
--- a/_docs/sql-reference/data-types/010-supported-data-types.md
+++ b/_docs/sql-reference/data-types/010-supported-data-types.md
@@ -39,6 +39,12 @@ To enable the DECIMAL type, set the `planner.enable_decimal_data_type` option to
     +-------+--------------------------------------------+
     1 row selected (0.08 seconds)
 
+## Disabling the DECIMAL Type
+
+By default, Drill disables the DECIMAL data type (an alpha feature), including casting to DECIMAL and reading DECIMAL types from Parquet and Hive. If you enabled the DECIMAL type by setting `planner.enable_decimal_data_type` = true, set the option to false to disable DECIMAL.
+
+When the DECIMAL type is disabled, you might see DECIMAL in the query plan, but Drill internally converts DECIMAL to NUMERIC.
+
 ## Composite Types
 
 Drill supports the following composite types:
@@ -70,7 +76,7 @@ changes in the data processing, Drill regenerates the code as necessary.
 
 ## Casting and Converting Data Types
 
-In Drill, you cast or convert data to the required type for moving data from one data source to another or to make the data readable.
+In Drill, you cast or convert data to the required type for moving data from one data source to another.
 You do not assign a data type to every column name in a CREATE TABLE statement to define the table as you do in database software. Instead, you use the CREATE TABLE AS (CTAS) statement with one or more of the following functions to define the table:
 
 * [CAST]({{ site.baseurl }}/docs/data-type-conversion#cast)    
@@ -81,7 +87,7 @@ You do not assign a data type to every column name in a CREATE TABLE statement t
 In some cases, Drill converts schema-less data to correctly-typed data implicitly. In this case, you do not need to cast the data. The file format of the data and the nature of your query determines the requirement for casting or converting. Differences in casting depend on the data source. The following list describes how Drill treats data types from various data sources:
 
 * HBase  
-  Does not implicitly cast input to SQL types. Convert data to appropriate types as shown in ["Querying HBase."]({{ site.baseurl }}/docs/querying-hbase/)
+  Does not implicitly cast input to SQL types. Convert data to appropriate types as as described in the section ["Querying HBase"]({{ site.baseurl}}/docs/querying-hbase/). Use [CONVERT_TO or CONVERT_FROM data types]({{ site.baseurl }}/docs//data-type-conversion/#convert_to-and-convert_from).
 * Hive  
   Implicitly casts Hive types to SQL types as shown in the Hive [type mapping example]({{ site.baseurl }}/docs/hive-to-drill-data-type-mapping#type-mapping-example)
 * JSON  
@@ -125,8 +131,8 @@ In a textual file, such as CSV, Drill interprets every field as a VARCHAR, as pr
 
 * [CAST]({{ site.baseurl }}/docs/data-type-conversion#cast)  
   Casts data from one data type to another.
-* [CONVERT_TO and CONVERT_FROM]({{ site.baseurl }}/docs/data-type-conversion#convert_to-and-convert_from)  
-  Converts data, including binary data, from one data type to another.
+* CONVERT_TO and CONVERT_FROM functions
+  Converts data, including binary data, from one data type to another using ["CONVERT_TO and CONVERT_FROM data types"]({{ site.baseurl }}/docs/supported-data-types/#convert_to-and-convert_from-data-types)  
 * [TO_CHAR]({{ site.baseurl }}/docs/data-type-conversion/#to_char)  
   Converts a TIMESTAMP, INTERVALDAY/INTERVALYEAR, INTEGER, DOUBLE, or DECIMAL to a string.
 * [TO_DATE]({{ site.baseurl }}/docs/data-type-conversion/#to_date)  
@@ -184,19 +190,12 @@ If your FIXEDBINARY or VARBINARY data is in a format other than UTF-8, or big-en
 
 \* Used to cast binary UTF-8 data coming to/from sources such as HBase. The CAST function does not support all representations of FIXEDBINARY and VARBINARY. Only the UTF-8 format is supported. 
 
-## CONVERT_TO and CONVERT_FROM
+## CONVERT_TO and CONVERT_FROM Data Types
 
-CONVERT_TO converts data to binary from the input type. CONVERT_FROM converts data from binary to the input type. For example, the following CONVERT_TO function converts an integer encoded using big endian to VARBINARY:
+The [CONVERT_TO function]({{site.baseurl}}/docs/data-type-conversion/#convert_to-and-convert_from) converts data to bytes from the input type. The [CONVERT_FROM function]({{site.baseurl}}/docs/data-type-conversion/#convert_to-and-convert_from) converts data from bytes to the input type. For example, the following CONVERT_TO function converts an integer to bytes using big endian encoding:
 
     CONVERT_TO(mycolumn, 'INT_BE')
 
-CONVERT_FROM and CONVERT_TO methods transform a known binary representation/encoding to a Drill internal format. 
-
-We recommend storing HBase data in a binary representation rather than
-a string representation. Use the \*\_BE types to store integer data types in a table such as HBase.  INT is a 4-byte integer encoded in little endian. INT_BE is a 4-byte integer encoded in big endian. The comparison order of \*\_BE encoded bytes is the same as the integer value itself if the bytes are unsigned or positive. Using a *_BE type facilitates scan range pruning and filter pushdown into HBase scan. 
-
-\*\_HADOOPV in the data type name denotes the variable length integer as defined by Hadoop libraries. Use a \*\_HADOOPV type if user data is encoded in this format by a Hadoop tool outside MapR.
-
 The following table lists the data types for use with the CONVERT_TO
 and CONVERT_FROM functions:
 
@@ -220,10 +219,14 @@ DATE_EPOCH_BE| bytes(8)| DATE
 DATE_EPOCH| bytes(8)| DATE  
 TIME_EPOCH_BE| bytes(8)| TIME  
 TIME_EPOCH| bytes(8)| TIME  
+TIMESTAMP_EPOCH| bytes(8)| DATE/TIME
 UTF8| bytes| VARCHAR  
 UTF16| bytes| VAR16CHAR  
 UINT8| bytes(8)| UINT8  
+UINT8_BE| bytes(8)| UINT8
 
-If you are unsure that the size of the source and destination INT or BIGINT you are converting is the same, use CAST to convert these data types to/from binary.
+This table includes types such as INT, for converting little endian-encoded data and types such as INT_BE for converting big endian-encoded data to Drill internal types. You need to convert binary representations, such as data in HBase, to a Drill internal format as you query the data. If you are unsure that the size of the source and destination INT or BIGINT you are converting is the same, use CAST to convert these data types to/from binary.  
+
+\*\_HADOOPV in the data type name denotes the variable length integer as defined by Hadoop libraries. Use a \*\_HADOOPV type if user data is encoded in this format by a Hadoop tool outside MapR.
 
 

http://git-wip-us.apache.org/repos/asf/drill/blob/de783c42/_docs/sql-reference/data-types/030-handling-different-data-types.md
----------------------------------------------------------------------
diff --git a/_docs/sql-reference/data-types/030-handling-different-data-types.md b/_docs/sql-reference/data-types/030-handling-different-data-types.md
index ed1e0aa..56e01ef 100644
--- a/_docs/sql-reference/data-types/030-handling-different-data-types.md
+++ b/_docs/sql-reference/data-types/030-handling-different-data-types.md
@@ -3,7 +3,7 @@ title: "Handling Different Data Types"
 parent: "Data Types"
 ---
 ## Handling HBase Data
-To query HBase data in Drill, convert every column of an HBase table to/from byte arrays from/to an SQL data type using [CONVERT_TO or CONVERT_FROM]({{ site.baseurl }}/docs//data-type-conversion/#convert_to-and-convert_from) with one exception: When converting data represented as a string to an INT or BIGINT number, use CAST. Use [CAST]({{ site.baseurl }}/docs/data-type-conversion/#cast) to convert integers to/from HBase.
+To query HBase data using Drill, convert every column of an HBase table to/from byte arrays from/to an SQL data type as described in the section ["Querying HBase"]({{ site.baseurl}}/docs/querying-hbase/). Use [CONVERT_TO or CONVERT_FROM]({{ site.baseurl }}/docs//data-type-conversion/#convert_to-and-convert_from) functions to perform conversions of HBase data.
 
 ## Handling Textual Data
 In a textual file, such as CSV, Drill interprets every field as a VARCHAR, as previously mentioned. In addition to using the CAST function, you can also use TO_CHAR, TO_DATE, TO_NUMBER, and TO_TIMESTAMP. If the SELECT statement includes a WHERE clause that compares a column of an unknown data type, you might need to cast both the value of the column and the comparison value in the WHERE clause. In some cases, Drill performs implicit casting and no casting is necessary on your part.

http://git-wip-us.apache.org/repos/asf/drill/blob/de783c42/_docs/sql-reference/sql-commands/035-partition-by-clause.md
----------------------------------------------------------------------
diff --git a/_docs/sql-reference/sql-commands/035-partition-by-clause.md b/_docs/sql-reference/sql-commands/035-partition-by-clause.md
index 5bf97d6..32dceb5 100644
--- a/_docs/sql-reference/sql-commands/035-partition-by-clause.md
+++ b/_docs/sql-reference/sql-commands/035-partition-by-clause.md
@@ -6,7 +6,7 @@ The PARTITION BY clause in the CTAS command partitions data, which Drill [prunes
 
 ## Syntax
 
-     [ PARTITION_BY ( column_name[, . . .] ) ]
+     [ PARTITION BY ( column_name[, . . .] ) ]
 
 The PARTITION BY clause partitions the data by the first column_name, and then subpartitions the data by the next column_name, if there is one, and so on. 
 
@@ -15,13 +15,13 @@ Only the Parquet storage format is supported for partitioning. Before using CTAS
 When the base table in the SELECT statement is schema-less, include columns in the PARTITION BY clause in the table's column list, or use a select all (SELECT *) statement:  
 
     CREATE TABLE dest_name [ (column, . . .) ]
-    [ PARTITION_BY (column, . . .) ] 
+    [ PARTITION BY (column, . . .) ] 
     AS SELECT column_list FROM <source_name>;
 
 When columns in the source table have ambiguous names, such as COLUMNS[0], define one or more column aliases in the SELECT statement. Use the alias name or names in the CREATE TABLE list. List aliases in the same order as the corresponding columns in the SELECT statement. Matching order is important because Drill performs an overwrite operation.  
 
     CREATE TABLE dest_name (alias1, alias2, . . .) 
-    [ PARTITION_BY (alias1, . . . ) ] 
+    [ PARTITION BY (alias1, . . . ) ] 
     AS SELECT column1 alias1, column2 alias2, . . .;
 
 For example:

http://git-wip-us.apache.org/repos/asf/drill/blob/de783c42/_docs/sql-reference/sql-functions/020-data-type-conversion.md
----------------------------------------------------------------------
diff --git a/_docs/sql-reference/sql-functions/020-data-type-conversion.md b/_docs/sql-reference/sql-functions/020-data-type-conversion.md
index f78a987..b0eb6aa 100644
--- a/_docs/sql-reference/sql-functions/020-data-type-conversion.md
+++ b/_docs/sql-reference/sql-functions/020-data-type-conversion.md
@@ -27,10 +27,11 @@ The target data type, such as INTEGER or DATE, to which to cast the expression
 
 ### CAST Usage Notes
 
-Use CONVERT_TO and CONVERT_FROM instead of the CAST function for converting binary data types with one exception: When converting an INT or BIGINT number, having a byte count in the destination/source that does not match the byte count of the number in the VARBINARY source/destination, use CAST.  
+Use CONVERT_TO and CONVERT_FROM instead of the CAST function for converting binary data types.
 
-Refer to the following tables for information about the data types to use for casting:
+See the following tables for information about the data types to use for casting:
 
+* [CONVERT_TO and CONVERT_FROM Data Types]({{ site.baseurl }}/docs/supported-data-types/#convert_to-and-convert_from-data-types)
 * [Supported Data Types for Casting]({{ site.baseurl }}/docs/supported-data-types)
 * [Explicit Type Casting Maps]({{ site.baseurl }}/docs/supported-data-types/#explicit-type-casting-maps)
 
@@ -136,8 +137,7 @@ Because you cast the INTERVAL_col to INTERVAL SECOND, Drill returns the interval
 
 ## CONVERT_TO and CONVERT_FROM
 
-The CONVERT_TO and CONVERT_FROM functions encode and decode
-data to and from another data type.
+The CONVERT_TO and CONVERT_FROM functions convert binary data to/from Drill internal types based on the little or big endian encoding of the data.
 
 ### CONVERT_TO and CONVERT_FROM Syntax  
 
@@ -147,21 +147,22 @@ data to and from another data type.
 
 *column* is the name of a column Drill reads.
 
-*type* is one of the data types listed in the [CONVERT_TO/FROM Data Types]({{ site.baseurl }}/docs/data-types#convert_to-and-convert_from-data-types) table.
+*type* is one of the encoding types listed in the [CONVERT_TO/FROM Data Types]({{ site.baseurl }}/docs/data-types#convert_to-and-convert_from-data-types) table. 
 
 
 ### CONVERT_TO and CONVERT_FROM Usage Notes
 
-CONVERT_FROM and CONVERT_TO methods transform a known binary representation/encoding to a Drill internal format. Use CONVERT_TO and CONVERT_FROM instead of the CAST function for converting binary data types with one exception: When converting data represented as a string in HBase to an INT or BIGINT number, use CAST. CONVERT_TO/FROM functions work for data in a binary representation and are more efficient to use than CAST. For example, HBase stores
-data as encoded VARBINARY data. To read HBase data in Drill, convert every column of an HBase table *from* binary to an Drill internal type. To write HBase or Parquet binary data, convert SQL data *to* binary data and store the data in an HBase or Parquet while creating a table as a selection (CTAS).
+CONVERT_FROM and CONVERT_TO methods transform a known binary representation/encoding to a Drill internal format. Use CONVERT_TO and CONVERT_FROM instead of the CAST function for converting binary data types. CONVERT_TO/FROM functions work for data in a binary representation and are more efficient to use than CAST. 
+
+Drill can optimize scans of HBase tables when you use the \*\_BE encoded types shown in section  ["CONVERT_TO and CONVERT_FROM Data Types"]({{ site.baseurl }}/docs/supported-data-types/#convert_to-and-convert_from-data-types) on big endian-encoded data. You need to use the HBase storage plugin and query data as described in ["Querying Hbase"]({{ site.baseurl }}/docs/querying-hbase). To write Parquet binary data, convert SQL data *to* binary data and store the data in a Parquet table while creating a table as a selection (CTAS).
 
 CONVERT_TO also converts an SQL data type to complex types, including HBase byte arrays, JSON and Parquet arrays, and maps. CONVERT_FROM converts from complex types, including HBase arrays, JSON and Parquet arrays and maps to an SQL data type. 
 
-Use the BINARY_STRING and STRING_BINARY custom Drill functions with CONVERT_TO and CONVERT_FROM to get meaningful results.
+You can use [STRING_BINARY]({{ site.baseurl }}/docs/data-type-conversion#string_binary-function) and [BINARY_STRING]({{ site.baseurl }}/docs/data-type-conversion#binary_string-function) custom Drill functions with CONVERT_TO and CONVERT_FROM to get meaningful results.
 
 ### Conversion of Data Types Examples
 
-This example shows how to use the CONVERT_FROM function to convert complex HBase data to a readable type. The example summarizes and continues the ["Query HBase"]({{ site.baseurl }}/docs/querying-hbase) example. The ["Query HBase"]({{ site.baseurl }}/docs/querying-hbase) example stores the following data in the students table on the Drill Sandbox:  
+This example shows how to use the CONVERT_FROM function to convert HBase data to a SQL type. The example summarizes and continues the ["Query HBase"]({{ site.baseurl }}/docs/querying-hbase) example. The ["Query HBase"]({{ site.baseurl }}/docs/querying-hbase) example stores the following data in the students table on the Drill Sandbox:  
 
     USE maprdb;
 
@@ -177,7 +178,7 @@ This example shows how to use the CONVERT_FROM function to convert complex HBase
     +-------------+---------------------+---------------------------------------------------------------------------+
     4 rows selected (1.335 seconds)
 
-You use the CONVERT_FROM function to decode the binary data to render it readable, selecting a data type to use from the [list of supported types]({{ site.baseurl }}/docs/data-type-conversion/#convert_to-and-convert_from-data-types). JSON supports strings. To convert binary to strings, use the UTF8 type.:
+You use the CONVERT_FROM function to decode the binary data, selecting a data type to use from the [list of supported types]({{ site.baseurl }}/docs/data-type-conversion/#convert_to-and-convert_from-data-types). JSON supports strings. To convert bytes to strings, use the UTF8 type:
 
     SELECT CONVERT_FROM(row_key, 'UTF8') AS studentid, 
            CONVERT_FROM(students.account.name, 'UTF8') AS name, 
@@ -195,7 +196,7 @@ You use the CONVERT_FROM function to decode the binary data to render it readabl
     +------------+------------+------------+------------------+------------+
     4 rows selected (0.504 seconds)
 
-This example converts from VARCHAR to a JSON map:
+This example converts VARCHAR data to a JSON map:
 
     SELECT CONVERT_FROM('{x:100, y:215.6}' ,'JSON') AS MYCOL FROM (VALUES(1));
     +----------------------+
@@ -205,7 +206,7 @@ This example converts from VARCHAR to a JSON map:
     +----------------------+
     1 row selected (0.163 seconds)
 
-This example uses a list of BIGINT as input and returns a repeated list of vectors:
+This example uses a list of BIGINT data as input and returns a repeated list of vectors:
 
     SELECT CONVERT_FROM('[ [1, 2], [3, 4], [5]]' ,'JSON') AS MYCOL1 FROM (VALUES(1));
     +------------+
@@ -231,7 +232,7 @@ This example assumes you are working in the Drill Sandbox. You modify the `dfs`
 
 1. Copy/paste the `dfs` storage plugin definition to a newly created plugin called myplugin.
 
-2. Change the root location to "/mapr/demo.mapr.com/tables". This change allows you to query tables for reading in the tables directory by workspace.table name. This change allows you to read a table in the `tables` directory. You can write a converted version of the table in the `tmp` directory because the writable property is true.
+2. Change the root location to "/mapr/demo.mapr.com/tables". After this change, you can read a table in the `tables` directory. You can write a converted version of the table in the `tmp` directory because the writable property is true.
 
         {
           "type": "file",
@@ -361,7 +362,7 @@ First, you set the storage format to JSON. Next, you use the CREATE TABLE AS (CT
         +-------------+-------------+-------------+-------------+-------------+
         4 rows selected (0.12 seconds)
 
-9. Use CONVERT_FROM to convert the Parquet data to a readable format:
+9. Use CONVERT_FROM to read the Parquet data:
 
         SELECT CONVERT_FROM(id, 'UTF8') AS id, 
                CONVERT_FROM(name, 'UTF8') AS name, 

http://git-wip-us.apache.org/repos/asf/drill/blob/de783c42/_docs/sql-reference/sql-functions/040-string-manipulation.md
----------------------------------------------------------------------
diff --git a/_docs/sql-reference/sql-functions/040-string-manipulation.md b/_docs/sql-reference/sql-functions/040-string-manipulation.md
index 1245451..3993824 100644
--- a/_docs/sql-reference/sql-functions/040-string-manipulation.md
+++ b/_docs/sql-reference/sql-functions/040-string-manipulation.md
@@ -232,7 +232,7 @@ Returns the location of a substring.
 
 ## REGEXP_REPLACE
 
-Substitutes new text for substrings that match [POSIX regular expression patterns](http://www.regular-expressions.info/posix.html).
+Substitutes new text for substrings that match [Java regular expression patterns](http://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html).
 
 ### REGEXP_REPLACE Syntax
 


[3/4] drill git commit: hide rowkey filter pushdown

Posted by br...@apache.org.
hide rowkey filter pushdown


Project: http://git-wip-us.apache.org/repos/asf/drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/drill/commit/da949f12
Tree: http://git-wip-us.apache.org/repos/asf/drill/tree/da949f12
Diff: http://git-wip-us.apache.org/repos/asf/drill/diff/da949f12

Branch: refs/heads/gh-pages
Commit: da949f12b658542fbf2f8a961ce38797d51d3154
Parents: dae0b0c
Author: Kristine Hahn <kh...@maprtech.com>
Authored: Fri Sep 4 17:53:10 2015 -0700
Committer: Kristine Hahn <kh...@maprtech.com>
Committed: Fri Sep 4 18:32:28 2015 -0700

----------------------------------------------------------------------
 _docs/query-data/030-querying-hbase.md | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/drill/blob/da949f12/_docs/query-data/030-querying-hbase.md
----------------------------------------------------------------------
diff --git a/_docs/query-data/030-querying-hbase.md b/_docs/query-data/030-querying-hbase.md
index 8b4009f..da7cb85 100644
--- a/_docs/query-data/030-querying-hbase.md
+++ b/_docs/query-data/030-querying-hbase.md
@@ -2,7 +2,7 @@
 title: "Querying HBase"
 parent: "Query Data"
 ---
-To use Drill to query HBase data, you need to understand how to work with the HBase byte arrays. If you want Drill to interpret the underlying HBase row key as something other than a byte array, you need to know the encoding of the data in HBase. By default, HBase stores data in little endian and Drill assumes the data is little endian, which is unsorted. The following table shows the sorting of typical rowkey IDs in bytes, encoded in little endian and big endian, respectively:
+<!-- To use Drill to query HBase data, you need to understand how to work with the HBase byte arrays. If you want Drill to interpret the underlying HBase row key as something other than a byte array, you need to know the encoding of the data in HBase. By default, HBase stores data in little endian and Drill assumes the data is little endian, which is unsorted. The following table shows the sorting of typical rowkey IDs in bytes, encoded in little endian and big endian, respectively:
 
 | IDs in Byte Notation Little Endian Sorting | IDs in Decimal Notation | IDs in Byte Notation Big Endian Sorting | IDs in Decimal Notation |
 |--------------------------------------------|-------------------------|-----------------------------------------|-------------------------|
@@ -76,7 +76,7 @@ By taking advantage of ordered byte encoding, Drill 1.2 and later can performant
 
 ## Querying Little Endian-Encoded Data
 
-As mentioned earlier, HBase stores data in little endian by default and Drill assumes the data is encoded in little endian. This exercise involves working with data that is encoded in little endian. First, you create two tables in HBase, students and clicks, that you can query with Drill. You use the CONVERT_TO and CONVERT_FROM functions to convert binary text to/from typed data. You use the CAST function to convert the binary data to an INT in step 4 of [Query HBase Tables]({{site.baseurl}}/docs/querying-hbase/#query-hbase-tables). When converting an INT or BIGINT number, having a byte count in the destination/source that does not match the byte count of the number in the binary source/destination, use CAST.
+As mentioned earlier,  -->HBase stores data in little endian by default and Drill assumes the data is encoded in little endian. This exercise involves working with data that is encoded in little endian. First, you create two tables in HBase, students and clicks, that you can query with Drill. You use the CONVERT_TO and CONVERT_FROM functions to convert binary text to/from typed data. You use the CAST function to convert the binary data to an INT in step 4 of [Query HBase Tables]({{site.baseurl}}/docs/querying-hbase/#query-hbase-tables). When converting an INT or BIGINT number, having a byte count in the destination/source that does not match the byte count of the number in the binary source/destination, use CAST.
 
 ### Create the HBase tables
 


[4/4] drill git commit: fix comment

Posted by br...@apache.org.
fix comment

DRILL-3704


Project: http://git-wip-us.apache.org/repos/asf/drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/drill/commit/be2f2223
Tree: http://git-wip-us.apache.org/repos/asf/drill/tree/be2f2223
Diff: http://git-wip-us.apache.org/repos/asf/drill/diff/be2f2223

Branch: refs/heads/gh-pages
Commit: be2f2223449316bbf478e6b99cab8b1f74802d07
Parents: da949f1
Author: Kristine Hahn <kh...@maprtech.com>
Authored: Sat Sep 5 09:06:57 2015 -0700
Committer: Kristine Hahn <kh...@maprtech.com>
Committed: Sat Sep 5 11:15:22 2015 -0700

----------------------------------------------------------------------
 _docs/query-data/030-querying-hbase.md |  7 ++++---
 _docs/sql-reference/040-operators.md   | 28 ++++++++++++++++++++++++++++
 2 files changed, 32 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/drill/blob/be2f2223/_docs/query-data/030-querying-hbase.md
----------------------------------------------------------------------
diff --git a/_docs/query-data/030-querying-hbase.md b/_docs/query-data/030-querying-hbase.md
index da7cb85..474d1fe 100644
--- a/_docs/query-data/030-querying-hbase.md
+++ b/_docs/query-data/030-querying-hbase.md
@@ -2,7 +2,8 @@
 title: "Querying HBase"
 parent: "Query Data"
 ---
-<!-- To use Drill to query HBase data, you need to understand how to work with the HBase byte arrays. If you want Drill to interpret the underlying HBase row key as something other than a byte array, you need to know the encoding of the data in HBase. By default, HBase stores data in little endian and Drill assumes the data is little endian, which is unsorted. The following table shows the sorting of typical rowkey IDs in bytes, encoded in little endian and big endian, respectively:
+<!-- 
+To use Drill to query HBase data, you need to understand how to work with the HBase byte arrays. If you want Drill to interpret the underlying HBase row key as something other than a byte array, you need to know the encoding of the data in HBase. By default, HBase stores data in little endian and Drill assumes the data is little endian, which is unsorted. The following table shows the sorting of typical rowkey IDs in bytes, encoded in little endian and big endian, respectively:
 
 | IDs in Byte Notation Little Endian Sorting | IDs in Decimal Notation | IDs in Byte Notation Big Endian Sorting | IDs in Decimal Notation |
 |--------------------------------------------|-------------------------|-----------------------------------------|-------------------------|
@@ -75,8 +76,8 @@ For more examples, see the [test code:](https://github.com/apache/drill/blob/956
 By taking advantage of ordered byte encoding, Drill 1.2 and later can performantly execute conditional queries without a secondary index on HBase big endian data. 
 
 ## Querying Little Endian-Encoded Data
-
-As mentioned earlier,  -->HBase stores data in little endian by default and Drill assumes the data is encoded in little endian. This exercise involves working with data that is encoded in little endian. First, you create two tables in HBase, students and clicks, that you can query with Drill. You use the CONVERT_TO and CONVERT_FROM functions to convert binary text to/from typed data. You use the CAST function to convert the binary data to an INT in step 4 of [Query HBase Tables]({{site.baseurl}}/docs/querying-hbase/#query-hbase-tables). When converting an INT or BIGINT number, having a byte count in the destination/source that does not match the byte count of the number in the binary source/destination, use CAST.
+ -->
+As mentioned earlier, HBase stores data in little endian by default and Drill assumes the data is encoded in little endian. This exercise involves working with data that is encoded in little endian. First, you create two tables in HBase, students and clicks, that you can query with Drill. You use the CONVERT_TO and CONVERT_FROM functions to convert binary text to/from typed data. You use the CAST function to convert the binary data to an INT in step 4 of [Query HBase Tables]({{site.baseurl}}/docs/querying-hbase/#query-hbase-tables). When converting an INT or BIGINT number, having a byte count in the destination/source that does not match the byte count of the number in the binary source/destination, use CAST.
 
 ### Create the HBase tables
 

http://git-wip-us.apache.org/repos/asf/drill/blob/be2f2223/_docs/sql-reference/040-operators.md
----------------------------------------------------------------------
diff --git a/_docs/sql-reference/040-operators.md b/_docs/sql-reference/040-operators.md
index 4eb83f9..a502f01 100644
--- a/_docs/sql-reference/040-operators.md
+++ b/_docs/sql-reference/040-operators.md
@@ -13,6 +13,7 @@ You can use the following logical operators in your Drill queries:
   * BETWEEN
   * IN
   * LIKE
+  * ILIKE
   * NOT
   * OR 
 
@@ -38,6 +39,7 @@ You can use the following comparison operators in your Drill queries:
 You can use the following pattern matching operators in your Drill queries:
 
   * LIKE
+  * ILIKE
   * NOT LIKE
   * SIMILAR TO
   * NOT SIMILAR TO
@@ -73,3 +75,29 @@ The concatenate operator is an alternative to the [concat function]({{ site.base
 
 The concat function treats NULL as an empty string. The concatenate operator (||) returns NULL if any input is NULL.
 
+## Operator Precedence 
+
+The following table shows the precedence of operators in decreasing order:
+
+| Operator/Element                     | Associativity | Description                                                 |
+|--------------------------------------|---------------|-------------------------------------------------------------|
+| .                                    | left          | dot notation used, for example, to drill down in a JSON map |
+| [ ]                                  | left          | array-style notation to drill down into a JSON array        |
+| -                                    | right         | unary minus                                                 |
+| E                                    | left          | exponentiation                                              |
+| * / %                                | left          | multiplication, division, modulo                            |
+| + -                                  | left          | addition, subtraction                                       |
+| IS                                   |               | IS TRUE, IS FALSE, IS UNKNOWN, IS NULL                      |
+| IS NULL                              |               | test for null                                               |
+| IS NOT NULL                          |               | test for not null                                           |
+| (any other)                          | left          | all other native and user-defined operators                 |
+| IN                                   |               | set membership                                              |
+| BETWEEN                              |               | range containment                                           |
+| OVERLAPS                             |               | time interval overlap                                       |
+| LIKE ILIKE SIMILAR TO NOT SIMILAR TO |               | string pattern matching                                     |
+| < >                                  |               | less than, greater than                                     |
+| =                                    | right         | equality, assignment                                        |
+| NOT                                  | right         | logical negation                                            |
+| AND                                  | left          | logical conjunction                                         |
+| OR                                   | left          | logical disjunction                                         |
+


[2/4] drill git commit: json file

Posted by br...@apache.org.
json file

correct name of function

unhide parquet metadata feature

DRILL-3645

DRILL-3516

partition_by to partition by

DRILL-3697

hide parquet metadata

1.0 unresolved issues

minor edits

added the bit Andries suggested about 2 odbc.ini

corrected links

fix links ODBC install for Mac

rowkey filter pushdown

DRILL-3364/3492

DRILL-3056

DRILL-3056 fix minor edit

DRILL-3056 edit

DRILL-3148 and Smidth's review changes

json doc to add jReport doc


Project: http://git-wip-us.apache.org/repos/asf/drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/drill/commit/dae0b0c0
Tree: http://git-wip-us.apache.org/repos/asf/drill/tree/dae0b0c0
Diff: http://git-wip-us.apache.org/repos/asf/drill/diff/dae0b0c0

Branch: refs/heads/gh-pages
Commit: dae0b0c0b5e1b06257e6da0703f2a1427efe3b8a
Parents: de783c4
Author: Kristine Hahn <kh...@maprtech.com>
Authored: Fri Aug 14 18:21:23 2015 -0700
Committer: Kristine Hahn <kh...@maprtech.com>
Committed: Fri Sep 4 18:32:28 2015 -0700

----------------------------------------------------------------------
 _data/docs.json | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/drill/blob/dae0b0c0/_data/docs.json
----------------------------------------------------------------------
diff --git a/_data/docs.json b/_data/docs.json
index 85e50a2..5a3ca09 100644
--- a/_data/docs.json
+++ b/_data/docs.json
@@ -1,4 +1,4 @@
-{
+{  
     "by_title": {
         "2014 Q1 Drill Report": {
             "breadcrumbs": [