You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@drill.apache.org by ts...@apache.org on 2015/05/20 08:06:18 UTC

[01/14] drill git commit: release notes, troubleshooting

Repository: drill
Updated Branches:
  refs/heads/gh-pages 9c99ecfb7 -> 0120d252a


release notes, troubleshooting


Project: http://git-wip-us.apache.org/repos/asf/drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/drill/commit/2c0fb63a
Tree: http://git-wip-us.apache.org/repos/asf/drill/tree/2c0fb63a
Diff: http://git-wip-us.apache.org/repos/asf/drill/diff/2c0fb63a

Branch: refs/heads/gh-pages
Commit: 2c0fb63a700300c58a2fc042c92926e17b5aac6f
Parents: 00bed1a
Author: Kristine Hahn <kh...@maprtech.com>
Authored: Tue May 19 11:43:05 2015 -0700
Committer: Kristine Hahn <kh...@maprtech.com>
Committed: Tue May 19 11:43:05 2015 -0700

----------------------------------------------------------------------
 _data/docs.json               |  96 +++++--
 _docs/110-troubleshooting.md  | 276 +++++++++++++++++++++
 _docs/rn/070-0.9.0-rn copy.md |  29 +++
 _docs/rn/070-0.9.0-rn.md      |  29 ---
 _docs/rn/080-1.0.0-rn.md      | 495 +++++++++++++++++++++++++++++++++++++
 5 files changed, 877 insertions(+), 48 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/drill/blob/2c0fb63a/_data/docs.json
----------------------------------------------------------------------
diff --git a/_data/docs.json b/_data/docs.json
index cd651e9..7c6e8de 100644
--- a/_data/docs.json
+++ b/_data/docs.json
@@ -324,9 +324,9 @@
             "next_title": "Sample Datasets", 
             "next_url": "/docs/sample-datasets/", 
             "parent": "Release Notes", 
-            "previous_title": "Apache Drill 0.8.0 Release Notes", 
-            "previous_url": "/docs/apache-drill-0-8-0-release-notes/", 
-            "relative_path": "_docs/rn/070-0.9.0-rn.md", 
+            "previous_title": "Apache Drill 0.9.0 Release Notes", 
+            "previous_url": "/docs/apache-drill-0-9-0-release-notes/", 
+            "relative_path": "_docs/rn/080-1.0.0-rn.md", 
             "title": "Apache Drill 0.9.0 Release Notes", 
             "url": "/docs/apache-drill-0-9-0-release-notes/"
         }, 
@@ -1713,8 +1713,8 @@
                 }
             ], 
             "children": [], 
-            "next_title": "Developer Information", 
-            "next_url": "/docs/developer-information/", 
+            "next_title": "Troubleshooting", 
+            "next_url": "/docs/troubleshooting/", 
             "parent": "Develop Custom Functions", 
             "previous_title": "Using Custom Functions in Queries", 
             "previous_url": "/docs/using-custom-functions-in-queries/", 
@@ -2259,8 +2259,8 @@
                         }
                     ], 
                     "children": [], 
-                    "next_title": "Developer Information", 
-                    "next_url": "/docs/developer-information/", 
+                    "next_title": "Troubleshooting", 
+                    "next_url": "/docs/troubleshooting/", 
                     "parent": "Develop Custom Functions", 
                     "previous_title": "Using Custom Functions in Queries", 
                     "previous_url": "/docs/using-custom-functions-in-queries/", 
@@ -2622,8 +2622,8 @@
             "next_title": "Develop Drill", 
             "next_url": "/docs/develop-drill/", 
             "parent": "", 
-            "previous_title": "Custom Function Interfaces", 
-            "previous_url": "/docs/custom-function-interfaces/", 
+            "previous_title": "Troubleshooting", 
+            "previous_url": "/docs/troubleshooting/", 
             "relative_path": "_docs/120-developer-information.md", 
             "title": "Developer Information", 
             "url": "/docs/developer-information/"
@@ -6789,12 +6789,29 @@
                         }
                     ], 
                     "children": [], 
-                    "next_title": "Sample Datasets", 
-                    "next_url": "/docs/sample-datasets/", 
+                    "next_title": "Apache Drill 0.9.0 Release Notes", 
+                    "next_url": "/docs/apache-drill-0-9-0-release-notes/", 
                     "parent": "Release Notes", 
                     "previous_title": "Apache Drill 0.8.0 Release Notes", 
                     "previous_url": "/docs/apache-drill-0-8-0-release-notes/", 
-                    "relative_path": "_docs/rn/070-0.9.0-rn.md", 
+                    "relative_path": "_docs/rn/070-0.9.0-rn copy.md", 
+                    "title": "Apache Drill 0.9.0 Release Notes", 
+                    "url": "/docs/apache-drill-0-9-0-release-notes/"
+                }, 
+                {
+                    "breadcrumbs": [
+                        {
+                            "title": "Release Notes", 
+                            "url": "/docs/release-notes/"
+                        }
+                    ], 
+                    "children": [], 
+                    "next_title": "Sample Datasets", 
+                    "next_url": "/docs/sample-datasets/", 
+                    "parent": "Release Notes", 
+                    "previous_title": "Apache Drill 0.9.0 Release Notes", 
+                    "previous_url": "/docs/apache-drill-0-9-0-release-notes/", 
+                    "relative_path": "_docs/rn/080-1.0.0-rn.md", 
                     "title": "Apache Drill 0.9.0 Release Notes", 
                     "url": "/docs/apache-drill-0-9-0-release-notes/"
                 }
@@ -9376,6 +9393,18 @@
             "title": "Testing the ODBC Connection", 
             "url": "/docs/testing-the-odbc-connection/"
         }, 
+        "Troubleshooting": {
+            "breadcrumbs": [], 
+            "children": [], 
+            "next_title": "Developer Information", 
+            "next_url": "/docs/developer-information/", 
+            "parent": "", 
+            "previous_title": "Custom Function Interfaces", 
+            "previous_url": "/docs/custom-function-interfaces/", 
+            "relative_path": "_docs/110-troubleshooting.md", 
+            "title": "Troubleshooting", 
+            "url": "/docs/troubleshooting/"
+        }, 
         "Tutorials": {
             "breadcrumbs": [], 
             "children": [
@@ -13749,8 +13778,8 @@
                         }
                     ], 
                     "children": [], 
-                    "next_title": "Developer Information", 
-                    "next_url": "/docs/developer-information/", 
+                    "next_title": "Troubleshooting", 
+                    "next_url": "/docs/troubleshooting/", 
                     "parent": "Develop Custom Functions", 
                     "previous_title": "Using Custom Functions in Queries", 
                     "previous_url": "/docs/using-custom-functions-in-queries/", 
@@ -13770,6 +13799,18 @@
         }, 
         {
             "breadcrumbs": [], 
+            "children": [], 
+            "next_title": "Developer Information", 
+            "next_url": "/docs/developer-information/", 
+            "parent": "", 
+            "previous_title": "Custom Function Interfaces", 
+            "previous_url": "/docs/custom-function-interfaces/", 
+            "relative_path": "_docs/110-troubleshooting.md", 
+            "title": "Troubleshooting", 
+            "url": "/docs/troubleshooting/"
+        }, 
+        {
+            "breadcrumbs": [], 
             "children": [
                 {
                     "breadcrumbs": [
@@ -14018,8 +14059,8 @@
             "next_title": "Develop Drill", 
             "next_url": "/docs/develop-drill/", 
             "parent": "", 
-            "previous_title": "Custom Function Interfaces", 
-            "previous_url": "/docs/custom-function-interfaces/", 
+            "previous_title": "Troubleshooting", 
+            "previous_url": "/docs/troubleshooting/", 
             "relative_path": "_docs/120-developer-information.md", 
             "title": "Developer Information", 
             "url": "/docs/developer-information/"
@@ -14154,12 +14195,29 @@
                         }
                     ], 
                     "children": [], 
-                    "next_title": "Sample Datasets", 
-                    "next_url": "/docs/sample-datasets/", 
+                    "next_title": "Apache Drill 0.9.0 Release Notes", 
+                    "next_url": "/docs/apache-drill-0-9-0-release-notes/", 
                     "parent": "Release Notes", 
                     "previous_title": "Apache Drill 0.8.0 Release Notes", 
                     "previous_url": "/docs/apache-drill-0-8-0-release-notes/", 
-                    "relative_path": "_docs/rn/070-0.9.0-rn.md", 
+                    "relative_path": "_docs/rn/070-0.9.0-rn copy.md", 
+                    "title": "Apache Drill 0.9.0 Release Notes", 
+                    "url": "/docs/apache-drill-0-9-0-release-notes/"
+                }, 
+                {
+                    "breadcrumbs": [
+                        {
+                            "title": "Release Notes", 
+                            "url": "/docs/release-notes/"
+                        }
+                    ], 
+                    "children": [], 
+                    "next_title": "Sample Datasets", 
+                    "next_url": "/docs/sample-datasets/", 
+                    "parent": "Release Notes", 
+                    "previous_title": "Apache Drill 0.9.0 Release Notes", 
+                    "previous_url": "/docs/apache-drill-0-9-0-release-notes/", 
+                    "relative_path": "_docs/rn/080-1.0.0-rn.md", 
                     "title": "Apache Drill 0.9.0 Release Notes", 
                     "url": "/docs/apache-drill-0-9-0-release-notes/"
                 }

http://git-wip-us.apache.org/repos/asf/drill/blob/2c0fb63a/_docs/110-troubleshooting.md
----------------------------------------------------------------------
diff --git a/_docs/110-troubleshooting.md b/_docs/110-troubleshooting.md
new file mode 100644
index 0000000..eb6f98b
--- /dev/null
+++ b/_docs/110-troubleshooting.md
@@ -0,0 +1,276 @@
+---
+title: "Troubleshooting"
+---
+
+You may experience certain known issues when using Drill. This document lists some known issues and resolutions for each.
+
+## Before You Begin
+Before you begin troubleshooting issues that you encounter in Drill, make sure you know which Drillbit is acting as the Foreman in the Drill cluster. The Drill node to which a client or application connects is the Foreman. 
+
+You should also know the version of Drill running in the cluster. You can search JIRA for issues related to the version to see if a patch or workaround exists for the issue that you encountered.
+
+### Identify the Foreman
+Issue the following query to identify the node running as the Foreman:
+SELECT host FROM sys.drillbits WHERE `current` = true;
+
+### Identify the Drill Version
+Issue the following query to identify the version of Drill running in your cluster:
+SELECT commit_id FROM sys.version;
+
+### Enable Verbose Errors
+You can enable the verbose errors option for a more detailed error print-out.
+
+Issue the following command to enable the verbose errors option:  
+
+       ALTER SESSION SET `exec.errors.verbose` = true
+
+## Troubleshooting
+If you have any issues in Drill, search the following list for your issue and apply the suggested solution:
+
+**Query Parsing Errors**  
+Symptom:  
+
+       PARSE ERROR: At line x, column x: ...
+Solution: Verify that you are using valid syntax. See [SQL Reference]({{ site.baseurl }}/docs/sql-reference-introduction/).
+If you are using common words, they may be reserved words.  Make sure to use back ticks
+Confirm that you are using back ticks to quote identifiers when using special characters such as back slashes or periods from a file path.
+
+**Reserved Words**  
+Symptom:   
+
+       select count from dfs.drill.`test2.json`;
+       Query failed: SYSTEM ERROR: Failure parsing SQL. Encountered "count from" at line 1, column 8.
+       Was expecting one of:
+           "UNION" ...
+           "INTERSECT" ...
+Solution: Fix with correct syntax. See [Reserved Keywords]({{ site.baseurl }}/docs/reserved-keywords/).
+
+       select `count` from dfs.drill.`test2.json`;  
+
+**Tables not found**  
+Symptom:
+ 
+       select * from dfs.drill.test2.json;
+       Query failed: PARSE ERROR: From line 1, column 15 to line 1, column 17: Table 'dfs.drill.test2.json' not found  
+
+Solutions:
+
+1. Run SHOW FILES to list the files in the dfs.drill workspace. 
+2. Check the permission of the files with those for the the Drill user.  
+3. Verify backticks added for file name: select * from dfs.drill.``test2.json``;  
+4. Drill may not be able to determine the type of file you are trying to read. Try using Drill Default Input Format.  
+5. Verify that your storage plugin is correctly configured.
+6. Verify that Drill can auto-detect your file format.  Drill supports auto-detection for the following formats:  
+ * CSV
+ * TSV
+ * PSV
+ * Parquet
+ * JSON
+
+**Access nested fields without table name/alias**  
+Symptom: 
+
+       select x.y …  
+       PARSE ERROR: At line 1, column 8: Table 'x' not found  
+Solution: Add table name or alias to the field reference:  
+
+       select t.x.y from t  
+
+**Unexpected null values for columns in results**  
+Symptom:  The following type of query returns NULL values:  
+
+       select t.price from t 
+
+
+Solution: Drill is schema-less system. Verify that column names are typed correctly.
+
+
+**Using functions with incorrect data types**  
+
+Symptom: Example  
+
+       select trunc(c3) from t3;
+       
+       0: jdbc:drill:schema=dfs> select trunc(c3) from t3;
+       Query failed: SYSTEM ERROR: Failure while trying to materialize incoming schema.  Errors:
+        
+       Error in expression at index -1.  Error: Missing function implementation: [trunc(DATE-OPTIONAL)].  Full expression: --UNKNOWN EXPRESSION--..
+       
+       Fragment 0:0
+       
+       [6e465594-4d83-4042-b88d-50e7eb207484 on atsqa4-133.qa.lab:31010]
+       Error: exception while executing query: Failure while executing query. (state=,code=0)  
+
+Solution: Ensure that the function is invoked with the correct data type parameters. In the example above, c3 is an unsupported date type. 
+
+**Query takes a long time to return** 
+
+Symptom: Query takes longer to return than expected.
+
+Solution: Review the [query profile]({{ site.baseurl }}/docs/query-profiles/) and:  
+
+ * Determine whether progress is being made (look at last update and last change times).
+ * Look at where Drill is currently spending time and try to optimize those operations.
+ * Confirm that Drill is taking advantage of the nature of your data, including things like partition pruning and projection pushdown.
+
+**Schema changes**  
+
+Symptom:  
+
+       DATA_READ ERROR: Error parsing JSON - You tried to write a XXXX type when you are using a ValueWriter of type XXXX.       
+       File  /src/data/schema.json
+       Record  2
+       Fragment 0:0  
+
+Solution: Drill does not fully support schema changes.  In this case, you will need to either ensure that your schemas are the same or only select columns that share schema.
+
+**Timestamps and Timezones other than UTC**  
+
+Symptoms: Issues with timestamp and timezone. Illegal instant due to time zone offset transition (America/New_York)
+
+Solution: Convert data to UTC format. You are most likely trying to import date and time data that is encoded one timezone in a different timezone.  Drill’s default behavior is to use the system’s time for converting incoming data.  If you are providing UTC data and your Drillbit nodes do not run with UTC time, you’ll need to run your JVM with the following system property:
+
+     -Duser.timezone=UTC  
+
+ `http://www.openkb.info/2015/05/understanding-drills-timestamp-and.html `  
+
+**Unexpected ODBC issues**  
+
+Symptom: ODBC errors.
+
+Solution: Make sure that the ODBC driver version is compatible with the server version. 
+Turn on ODBC driver debug logging to better understand failure.  
+
+**Connectivity issues when connecting via ZooKeeper for JDBC/ODBC**  
+
+Symptom: Client cannot resolve ZooKeeper host names for JDBC/ODBC.
+
+Solution: Ensure that Zookeeper is up and running. Verify that Drill has the correct drill-override.conf settings for the Zookeeper quorum.
+
+**Metadata queries take a long time to return**  
+
+Symptom: Running SHOW databases/schemas/tables hangs (in general any information_schema queries hang).
+
+Solution: Disable incorrectly configured storage plugins or start appropriate services. Check compatibility matrix for the appropriate versions.  
+
+**Unexpected results due to implicit casting**  
+
+Symptom: rill implicitly casts based on order of precedence.
+
+Solution: Review Drill casting behaviors and explicitly cast for the expected results. See [Data Types]({{ site.baseurl }}/docs/handling-different-data-types/).
+
+**Column alias causes an error**  
+
+Symptom: Drill is not case sensitive, and you can provide any alias for a column name. However, if the storage type is case sensitive, the alias name may conflict and cause errors.
+
+Solution: Verify that the column alias does not conflict with the storage type. See [Lexical Structures]({{ site.baseurl }}/docs/lexical-structure/#case-sensitivity).  
+
+**List (arrays) contains null**  
+
+Symptom: UNSUPPORTED\_OPERATION ERROR: Null values are not supported in lists by default. Please set store.json.all\_text_mode to true to read lists containing nulls. Be advised that this will treat JSON null values as a string containing the word 'null'.
+
+Solution: Change Drill session settings to enable all_text_mode per message.  
+Avoid selecting fields that are arrays containing nulls.
+
+**SELECT COUNT (\*) takes a long time to run**  
+
+Solution: In come cases, the underlying storage format does not have a built-in capability to return a count of records in a table.  In these cases, Drill will do a full scan of the data to verify the number of records.
+
+**Tableau issues**  
+
+Symptom: You see a lot of error messages in ODBC trace files or the performance is slow.
+
+Solution: Verify that you have installed the TDC file shipped with the ODBC driver.  
+
+**Group by using alias**  
+
+Symptom: Invalid column.
+
+Solution: Not supported. Use column name and/or expression directly.  
+
+**Casting a Varchar string to an integer results in an error**  
+
+Symptom: 
+
+       SYSTEM ERROR: java.lang.NumberFormatException
+
+Solution: Per the ANSI SQL specification CAST to INT does not support empty strings.  If you want to change this behavior, you can set Drill to use the cast empty string to null behavior.  This can be done using the drill.exec.functions.cast_empty_string_to_null SESSION/SYSTEM option. 
+ 
+**Unexpected exception during fragment initialization**  
+
+Symptom: The error occurred during the Foreman phase of the query. The error typically occurs due to the following common causes:  
+
+* Malformed SQL that passed initial validation but failed upon further analysis
+* Extraneous files in query directories do not match the default format type
+
+Solution: Enable the verbose errors option and run the query again to see if further insight is provided.  
+
+**Queries running out of memory**  
+
+Symptom: 
+
+       RESOURCE ERROR: One or more nodes ran out of memory while executing the query.
+
+Solution:  
+
+* Increase the amount of direct memory allotted to Drill
+* If using CTAS, reduce the planner.width.max_per_node setting
+* Reduce the number of concurrent queries running on the cluster using Drill query queues
+* Disable hash aggregation and hash sort for your session
+* See [Configuration Options]({{ site.baseurl }}/docs/configuration-options-introduction/)  
+
+**Unclear Error Message**  
+
+Symptom: Cannot determine issue from error message.
+
+Solution: Turn on verbose errors. 
+
+       alter session set `exec.errors.verbose`=true;
+
+Determine your currently connected drillbit using select * from sys.drillbits.  Then review logs Drill logs from that drillbit.
+
+**SQLLine error starting Drill in embedded mode**  
+
+Symptom:  
+
+       java.net.BindException: Address already in use  
+
+Solution:  You can only run one Drillbit per node(embedded or daemon) using default settings.  You need to either change ports used by Drillbit or stop one Drillbit before starting another.
+ 
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+       
+
+
+
+
+
+
+

http://git-wip-us.apache.org/repos/asf/drill/blob/2c0fb63a/_docs/rn/070-0.9.0-rn copy.md
----------------------------------------------------------------------
diff --git a/_docs/rn/070-0.9.0-rn copy.md b/_docs/rn/070-0.9.0-rn copy.md
new file mode 100755
index 0000000..edae5c8
--- /dev/null
+++ b/_docs/rn/070-0.9.0-rn copy.md	
@@ -0,0 +1,29 @@
+---
+title: "Apache Drill 0.9.0 Release Notes"
+parent: "Release Notes"
+---
+It has been about a month since the release of Drill 0.8, which included [more than 240 improvements]({{ site.baseurl }}/blog/drill-0.8-released/). Today we're happy to announce the availability of Drill 0.9, providing additional enhancements and bug fixes. In fact, this release includes [200 resolved JIRAs](https://issues.apache.org/jira/secure/ReleaseNote.jspa?projectId=12313820&version=12328813). Some of the noteworthy features in Drill 0.9 are:
+
+* **Authentication** ([DRILL-2674](https://issues.apache.org/jira/browse/DRILL-2674)). Drill now supports username/password authentication through the Java and C++ clients, as well as JDBC and ODBC. On the server-side, Drill leverages Linux PAM to securely validate the credentials. Users can choose to use an external user directory such as Active Directory or LDAP. To enable authentication, set the `security.user.auth` option in `drill-override.conf`.
+* **Impersonation** ([DRILL-2363](https://issues.apache.org/jira/browse/DRILL-2363)). Queries now execute and access resources using the identity of the user who submitted the query. Previously, all queries would run as the same user (eg, `drill`). With the new impersonation capability, the query will fail if the submitting user does not have permission to read the requested file(s) in the distributed file system. To enable impersonation, set the `drill.exec.impersonation` option in `drill-override.conf`.
+* **Ownership chaining**. Drill now allows views with different owners to be chained. This represents a very flexible access control solution. For example, an administrator with access to raw, sensitive data could create a view called `masked` which would expose only a subset of the data to other users. The administrator would enable users to read the `masked` view but not the raw data. Note that Drill provides an option `max_chained_user_hops` that specifies how many ownership changed are allowed in a chain, thereby providing administrators (or data stewards) more control over sharing of data.
+* **MongoDB authentication** ([DRILL-1502](https://issues.apache.org/jira/browse/DRILL-1502)). Drill can now connect to a MongoDB cluster that requires authentication.
+* **Extended JSON datatypes**. Our friends at MongoDB invented [extended JSON](http://docs.mongodb.org/manual/reference/mongodb-extended-json/) - a set of extensions to the JSON format for supporting additional data types. We decided to embrace extended JSON in Drill. For example, standard JSON doesn't have a time type, so a time could be represented as either a string or a number: `{"foo": "19:20:30.450Z"}` is just a string. With extended JSON, the `$time` qualifier can be used to specify that `foo` is a time `{"foo": {"$time": "19:20:30.450Z"}}`.
+  We now support a number of qualifiers including `$bin`, `$date`, `$time`, `$interval`, `$numberLong` and `$dateDay` (see [the example](https://github.com/apache/drill/blob/master/exec/java-exec/src/test/resources/vector/complex/extended.json)). We're in the process of adding some additional qualifiers to make sure that all of MongoDB's extended types are supported (this is particularly important when querying data in MongoDB).
+* **Avro support** ([DRILL-1512](https://issues.apache.org/jira/browse/DRILL-1512)). Drill can now read Avro files. This patch was contributed by Andrew Selden at Elastic.co (formerly known as Elasticsearch).
+* **Improved error messages** ([DRILL-2675](https://issues.apache.org/jira/browse/DRILL-2675) and more). It can be challenging for a complex distributed system like Drill to translate low-level internal conditions into actionable messages to the user. This release includes several enhancements that enable Drill to accomplish just that in a variety of cases.
+* **Parquet and Calcite enhancements** ([DRILL-1410](https://issues.apache.org/jira/browse/DRILL-1410) and [DRILL-1384](https://issues.apache.org/jira/browse/DRILL-1384)). Drill isn't a traditional query engine - it's the first analytical query engine with a JSON data model. This has required us to enhance Parquet (our columnar format) and Calcite (our SQL parser). These enhancements have now been contributed back to those projects, and Drill is using the latest versions which include these enhancements.
+* **New sys tables for memory and thread information** ([DRILL-2275](https://issues.apache.org/jira/browse/DRILL-2275)). Drill includes two new `sys` tables that provide real-time metrics about memory utilization and threads on each of the nodes in the cluster. You can run a simple `SELECT *` to see what information is available:
+
+    ```sql
+    SELECT * FROM sys.drillmemory;
+    SELECT * FROM sys.drillbitthreads;
+    ```
+
+* **Support for very wide tables** ([DRILL-2739](https://issues.apache.org/jira/browse/DRILL-2739)). Drill previously had some issues with tables that had more than 4095 colums. This limitation has been addressed.
+
+You can now [download Drill 0.9]({{ site.baseurl }}/download/). As always, you can check out the official [release notes]({{ site.baseurl }}/docs/release-notes/) for more details.
+
+We're gearing up for Drill's 1.0 release later this month. Stay tuned!
+
+

http://git-wip-us.apache.org/repos/asf/drill/blob/2c0fb63a/_docs/rn/070-0.9.0-rn.md
----------------------------------------------------------------------
diff --git a/_docs/rn/070-0.9.0-rn.md b/_docs/rn/070-0.9.0-rn.md
deleted file mode 100755
index edae5c8..0000000
--- a/_docs/rn/070-0.9.0-rn.md
+++ /dev/null
@@ -1,29 +0,0 @@
----
-title: "Apache Drill 0.9.0 Release Notes"
-parent: "Release Notes"
----
-It has been about a month since the release of Drill 0.8, which included [more than 240 improvements]({{ site.baseurl }}/blog/drill-0.8-released/). Today we're happy to announce the availability of Drill 0.9, providing additional enhancements and bug fixes. In fact, this release includes [200 resolved JIRAs](https://issues.apache.org/jira/secure/ReleaseNote.jspa?projectId=12313820&version=12328813). Some of the noteworthy features in Drill 0.9 are:
-
-* **Authentication** ([DRILL-2674](https://issues.apache.org/jira/browse/DRILL-2674)). Drill now supports username/password authentication through the Java and C++ clients, as well as JDBC and ODBC. On the server-side, Drill leverages Linux PAM to securely validate the credentials. Users can choose to use an external user directory such as Active Directory or LDAP. To enable authentication, set the `security.user.auth` option in `drill-override.conf`.
-* **Impersonation** ([DRILL-2363](https://issues.apache.org/jira/browse/DRILL-2363)). Queries now execute and access resources using the identity of the user who submitted the query. Previously, all queries would run as the same user (eg, `drill`). With the new impersonation capability, the query will fail if the submitting user does not have permission to read the requested file(s) in the distributed file system. To enable impersonation, set the `drill.exec.impersonation` option in `drill-override.conf`.
-* **Ownership chaining**. Drill now allows views with different owners to be chained. This represents a very flexible access control solution. For example, an administrator with access to raw, sensitive data could create a view called `masked` which would expose only a subset of the data to other users. The administrator would enable users to read the `masked` view but not the raw data. Note that Drill provides an option `max_chained_user_hops` that specifies how many ownership changed are allowed in a chain, thereby providing administrators (or data stewards) more control over sharing of data.
-* **MongoDB authentication** ([DRILL-1502](https://issues.apache.org/jira/browse/DRILL-1502)). Drill can now connect to a MongoDB cluster that requires authentication.
-* **Extended JSON datatypes**. Our friends at MongoDB invented [extended JSON](http://docs.mongodb.org/manual/reference/mongodb-extended-json/) - a set of extensions to the JSON format for supporting additional data types. We decided to embrace extended JSON in Drill. For example, standard JSON doesn't have a time type, so a time could be represented as either a string or a number: `{"foo": "19:20:30.450Z"}` is just a string. With extended JSON, the `$time` qualifier can be used to specify that `foo` is a time `{"foo": {"$time": "19:20:30.450Z"}}`.
-  We now support a number of qualifiers including `$bin`, `$date`, `$time`, `$interval`, `$numberLong` and `$dateDay` (see [the example](https://github.com/apache/drill/blob/master/exec/java-exec/src/test/resources/vector/complex/extended.json)). We're in the process of adding some additional qualifiers to make sure that all of MongoDB's extended types are supported (this is particularly important when querying data in MongoDB).
-* **Avro support** ([DRILL-1512](https://issues.apache.org/jira/browse/DRILL-1512)). Drill can now read Avro files. This patch was contributed by Andrew Selden at Elastic.co (formerly known as Elasticsearch).
-* **Improved error messages** ([DRILL-2675](https://issues.apache.org/jira/browse/DRILL-2675) and more). It can be challenging for a complex distributed system like Drill to translate low-level internal conditions into actionable messages to the user. This release includes several enhancements that enable Drill to accomplish just that in a variety of cases.
-* **Parquet and Calcite enhancements** ([DRILL-1410](https://issues.apache.org/jira/browse/DRILL-1410) and [DRILL-1384](https://issues.apache.org/jira/browse/DRILL-1384)). Drill isn't a traditional query engine - it's the first analytical query engine with a JSON data model. This has required us to enhance Parquet (our columnar format) and Calcite (our SQL parser). These enhancements have now been contributed back to those projects, and Drill is using the latest versions which include these enhancements.
-* **New sys tables for memory and thread information** ([DRILL-2275](https://issues.apache.org/jira/browse/DRILL-2275)). Drill includes two new `sys` tables that provide real-time metrics about memory utilization and threads on each of the nodes in the cluster. You can run a simple `SELECT *` to see what information is available:
-
-    ```sql
-    SELECT * FROM sys.drillmemory;
-    SELECT * FROM sys.drillbitthreads;
-    ```
-
-* **Support for very wide tables** ([DRILL-2739](https://issues.apache.org/jira/browse/DRILL-2739)). Drill previously had some issues with tables that had more than 4095 colums. This limitation has been addressed.
-
-You can now [download Drill 0.9]({{ site.baseurl }}/download/). As always, you can check out the official [release notes]({{ site.baseurl }}/docs/release-notes/) for more details.
-
-We're gearing up for Drill's 1.0 release later this month. Stay tuned!
-
-

http://git-wip-us.apache.org/repos/asf/drill/blob/2c0fb63a/_docs/rn/080-1.0.0-rn.md
----------------------------------------------------------------------
diff --git a/_docs/rn/080-1.0.0-rn.md b/_docs/rn/080-1.0.0-rn.md
new file mode 100755
index 0000000..6c60b39
--- /dev/null
+++ b/_docs/rn/080-1.0.0-rn.md
@@ -0,0 +1,495 @@
+---
+title: "Apache Drill 0.9.0 Release Notes"
+parent: "Release Notes"
+---
+ Today we're happy to announce the availability of Drill 1.0.0, providing additional enhancements and bug fixes. This release includes the following new features, enhancements, and bug fixes:
+
+        Release Notes - Apache Drill - Version 1.0.0
+    
+<h2>        Sub-task
+</h2>
+<ul>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2150'>DRILL-2150</a>] -         Create an abstraction for repeated value vectors.
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2358'>DRILL-2358</a>] -         Ensure DrillScanRel differentiates skip-all, scan-all &amp; scan-some in a backward compatible fashion
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2893'>DRILL-2893</a>] -         ScanBatch throws a NullPointerException instead of returning OUT_OF_MEMORY
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2895'>DRILL-2895</a>] -         AbstractRecordBatch.buildSchema() should properly handle OUT_OF_MEMORY outcome
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2902'>DRILL-2902</a>] -         Add support for context UDFs: user (and its synonyms session_user, system_user) and current_schema
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2905'>DRILL-2905</a>] -         RootExec implementations should properly handle IterOutcome.OUT_OF_MEMORY
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2920'>DRILL-2920</a>] -         properly handle OutOfMemoryException
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2947'>DRILL-2947</a>] -         AllocationHelper.allocateNew() doesn&#39;t have a consistent behavior when it can&#39;t allocate
+</li>
+</ul>
+                        
+<h2>        Bug
+</h2>
+<ul>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-148'>DRILL-148</a>] -         Remove sandbox directory from source control, it is no longer utilized
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-625'>DRILL-625</a>] -         Server does not release resources even after client connection is closed
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-708'>DRILL-708</a>] -         TRUNC(n1) function returns a decimal instead of int
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-994'>DRILL-994</a>] -         Reduce hbase timeout when it is not reachable
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-1245'>DRILL-1245</a>] -         Drill should pinpoint to the &quot;Problem Record&quot; when it fails to parse a json file
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-1440'>DRILL-1440</a>] -         Allow delimited files to have customizable quote characters
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-1460'>DRILL-1460</a>] -         JsonReader fails reading files with decimal numbers and integers in the same field
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-1502'>DRILL-1502</a>] -         Can&#39;t connect to mongo when requiring auth
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-1503'>DRILL-1503</a>] -         CTAS does not work against mongo plugin
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-1542'>DRILL-1542</a>] -         Early fragment termination causes non running intermediate fragments to error
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-1545'>DRILL-1545</a>] -         Json files can only be read when they have a .json extension
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-1727'>DRILL-1727</a>] -         REPEATED_CONTAINS sometimes doesn&#39;t work
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-1827'>DRILL-1827</a>] -         Unit test framework reports expected and actual values backwards in unordered comparison
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-1832'>DRILL-1832</a>] -         Select * from json file failed with java.lang.IllegalArgumentException: null
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-1866'>DRILL-1866</a>] -         Tests that include limit sporadically fail when run as part of entire test suite on Linux
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-1891'>DRILL-1891</a>] -         Error message does not get propagated correctly when reading from JSON file
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-1973'>DRILL-1973</a>] -         Tableau query causes parsing error
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-1980'>DRILL-1980</a>] -         Create table with a Cast to interval day results in a file which cannot be read
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2005'>DRILL-2005</a>] -         Create table fails to write out a parquet file created from hive- read works fine
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2006'>DRILL-2006</a>] -         Implement text reader with advanced capabilities
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2036'>DRILL-2036</a>] -         select * query returns wrong result when column name in json file changes case
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2073'>DRILL-2073</a>] -         Filter on a field in a nested repeated type throws an exception
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2074'>DRILL-2074</a>] -         Queries fail with OutOfMemory Exception when Hash Join &amp; Agg are turned off
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2085'>DRILL-2085</a>] -         Failed to propagate error
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2091'>DRILL-2091</a>] -         NPE in AbstractSqlAccessor
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2093'>DRILL-2093</a>] -         Columns of time and timestamp data type are not stored correctly in json format on CTAS
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2140'>DRILL-2140</a>] -         RPC Error querying JSON with empty nested maps
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2141'>DRILL-2141</a>] -         Data type error in group by and order by for JSON
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2158'>DRILL-2158</a>] -          Failure while attempting to start Drillbit in embedded mode. 
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2179'>DRILL-2179</a>] -         better handle column called &#39;Timestamp&#39;
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2181'>DRILL-2181</a>] -         Throw proper error message when flatten is used within an &#39;order by&#39; or &#39;group by&#39;
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2201'>DRILL-2201</a>] -         clear error message on join on complex type
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2208'>DRILL-2208</a>] -         Error message must be updated when query contains operations on a flattened column
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2219'>DRILL-2219</a>] -         Concurrent modification exception in TopLevelAllocator if a child allocator is added during loop in close()
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2221'>DRILL-2221</a>] -         CTAS (JSON) creates unreadable files when writing empty arrays
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2228'>DRILL-2228</a>] -         Projecting &#39;*&#39; returns all nulls when we have flatten in a filter and order by
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2229'>DRILL-2229</a>] -         SQL syntax errors should use SQLSyntaxErrorException
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2232'>DRILL-2232</a>] -         Flatten functionality not well defined when we use flatten in an order by without projecting it
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2264'>DRILL-2264</a>] -         Incorrect data when we use aggregate functions with flatten
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2277'>DRILL-2277</a>] -         COUNT(*) should return 0 instead of an empty result set when there are no records
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2281'>DRILL-2281</a>] -         Drill never returns when we use aggregate functions after a join with an order by
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2292'>DRILL-2292</a>] -         CTAS broken when we have repeated maps
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2301'>DRILL-2301</a>] -         Query fails when multiple table aliases are provided for CTEs
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2340'>DRILL-2340</a>] -         count(*) fails with subquery not containing limit
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2350'>DRILL-2350</a>] -         Star query failed with exception on JSON data
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2376'>DRILL-2376</a>] -         UNION ALL on Aggregates with GROUP BY returns incomplete results
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2404'>DRILL-2404</a>] -         After we cancel a query, DRILL sometimes hangs for the next query
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2408'>DRILL-2408</a>] -         CTAS should not create empty folders when underlying query returns no results
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2411'>DRILL-2411</a>] -         Scalar SUM/AVG over empty result set returns no rows instead of NULL
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2423'>DRILL-2423</a>] -         DROP VIEW against non-existent views fails with ZK error
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2425'>DRILL-2425</a>] -         Wrong results when identifier change cases within the same data file
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2437'>DRILL-2437</a>] -         enhance exception injection to support session level injections
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2452'>DRILL-2452</a>] -         ResultSet.getDouble should not throw an exception when the underlying type is a FLOAT
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2476'>DRILL-2476</a>] -         Handle IterOutcome.STOP in buildSchema()
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2506'>DRILL-2506</a>] -         IOOB with order by and limit
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2511'>DRILL-2511</a>] -         Assert with full outer join when one of the join predicates is of a required type (nullabe parquet)
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2528'>DRILL-2528</a>] -         Drill-JDBC-All Jar uses outdated classes
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2532'>DRILL-2532</a>] -         Glob not always fired for DFS storage engine
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2533'>DRILL-2533</a>] -         Metrics displayed in the profile UI should be rounded off instead of being truncated
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2535'>DRILL-2535</a>] -         Column labels on drill profile page are incorrect
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2536'>DRILL-2536</a>] -         Peak Mem column in the profile page displays 0 when value is less than 1MB
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2545'>DRILL-2545</a>] -         Killing a JDBC client program does not kill the query on drillbits
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2548'>DRILL-2548</a>] -         JDBC driver prints misleading SQL exception on getting record batches with no data
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2552'>DRILL-2552</a>] -         ZK disconnect to foreman node results in hung query on client
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2554'>DRILL-2554</a>] -         Incorrect results for repeated values when using jdbc
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2569'>DRILL-2569</a>] -         Minor fragmentId in Profile UI gets truncated to the last 2 digits
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2570'>DRILL-2570</a>] -         Broken JDBC-All Jar packaging can cause missing XML classes
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2589'>DRILL-2589</a>] -         Creating a view with duplicate column names should fail or give a warning to the user
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2598'>DRILL-2598</a>] -         Order by with limit on complex type throw IllegalStateException
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2617'>DRILL-2617</a>] -         Errors in the execution stack will cause DeferredException to throw an IllegalStateException
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2624'>DRILL-2624</a>] -         org.apache.drill.common.StackTrace prints garbage for line numbers
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2662'>DRILL-2662</a>] -         Exception type not being included when propagating exception message
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2723'>DRILL-2723</a>] -         Inaccurate row count estimate for text files results in BroadcastExchange
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2750'>DRILL-2750</a>] -         Running 1 or more queries against Drillbits having insufficient DirectMem renders the Drillbits in an unusable state
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2753'>DRILL-2753</a>] -         Implicit cast fails when comparing a double column and a varchar literal
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2754'>DRILL-2754</a>] -         Allocation bug in splitAndTransfer method causing some flatten queries to fail
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2755'>DRILL-2755</a>] -         Use and handle InterruptedException during query processing
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2757'>DRILL-2757</a>] -         Verify operators correctly handle low memory conditions and cancellations
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2776'>DRILL-2776</a>] -         querying a .json file that contains a repeated type returns the wrong results 
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2778'>DRILL-2778</a>] -         Killing the drillbit which is the foreman results in hung sqlline
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2780'>DRILL-2780</a>] -         java.lang.IllegalStateException files open exceptions in drillbit.out
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2793'>DRILL-2793</a>] -         Killing a non foreman node results in direct memory being held on
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2801'>DRILL-2801</a>] -         ORDER BY produces extra records
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2806'>DRILL-2806</a>] -         Querying data from compressed csv file returns nulls and unreadable data
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2809'>DRILL-2809</a>] -         Increase the default value of partitioner_sender_threads_factor
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2811'>DRILL-2811</a>] -         Need option to specify Drillbit in the connection URI to connect to that specific Drillbit
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2816'>DRILL-2816</a>] -         system error does not display the original Exception message
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2823'>DRILL-2823</a>] -         Merge join should use implicit cast
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2824'>DRILL-2824</a>] -         Function resolution should be deterministic
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2826'>DRILL-2826</a>] -         Improve resilience to memory leaks and unclosed allocators
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2841'>DRILL-2841</a>] -         Web UI very slow in a multi node machine
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2847'>DRILL-2847</a>] -         DrillBufs from the RPC layer are being leaked
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2848'>DRILL-2848</a>] -         Disable decimal data type by default
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2849'>DRILL-2849</a>] -         Difference in query results over CSV file created by CTAS, compared to results over original CSV file 
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2865'>DRILL-2865</a>] -         Drillbit runs out of memory on multiple consecutive CTAS
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2870'>DRILL-2870</a>] -         Fix return type of aggregate functions to be nullable
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2871'>DRILL-2871</a>] -         Plan for TPC-H 20 changed with DRILL-1384 (or DRILL-2761) causing performance degradation
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2872'>DRILL-2872</a>] -         Result from json file returns data from map type fields as &quot;null&quot;
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2875'>DRILL-2875</a>] -         IllegalStateException when querying the public yelp json dataset
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2878'>DRILL-2878</a>] -         FragmentExecutor.closeOutResources() is not called if an exception happens in the Foreman before the fragment executor starts running
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2884'>DRILL-2884</a>] -         Have cancel() cause &quot;query canceled&quot; rather than just &quot;ResultSet closed&quot;
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2886'>DRILL-2886</a>] -         JDBC driver doesn&#39;t detect lost connection
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2887'>DRILL-2887</a>] -         Fix bad applications of JdbcTest.connect()
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2889'>DRILL-2889</a>] -         Rename JdbcTest to JdbcTestBase.
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2894'>DRILL-2894</a>] -         FixedValueVectors shouldn&#39;t set it&#39;s data buffer to null when it fails to allocate it
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2897'>DRILL-2897</a>] -         Update Limit 0 to avoid parallelization
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2904'>DRILL-2904</a>] -         Fix wrong &quot;before rows&quot; message to &quot;after rows&quot; message
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2907'>DRILL-2907</a>] -         Drill performance degrades significantly over time - resource leak
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2914'>DRILL-2914</a>] -         regression: Mondrian query534.q, drill give wrong result
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2921'>DRILL-2921</a>] -         Query with a mix of distinct and not distinct scalar aggregates runs out of memory
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2927'>DRILL-2927</a>] -         Pending query in resource queue starts after timeout
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2928'>DRILL-2928</a>] -         C++ Client - io_service needs to be reset if it runs out of work
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2932'>DRILL-2932</a>] -         Error text reported via System.out.println rather than thrown SQLException&#39;s message
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2934'>DRILL-2934</a>] -         Exception when distinct aggregate is compared to numeric literal with decimal point 
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2936'>DRILL-2936</a>] -         TPCH 4 and 18 SF100 hangs when hash agg is turned off
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2940'>DRILL-2940</a>] -         Large allocations are not released until GC kicks in
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2942'>DRILL-2942</a>] -         Allow Use of epoll RPC layer on Linux
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2943'>DRILL-2943</a>] -         Drill parsing error during deserialization for an Order-By
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2944'>DRILL-2944</a>] -         Switch to G1GC to reduce GC cpu overhead.
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2951'>DRILL-2951</a>] -         Tables are not visible when Drillbit is specified in the connection URL
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2953'>DRILL-2953</a>] -         Group By + Order By query results are not ordered.
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2957'>DRILL-2957</a>] -         Netty Memory Manager doesn&#39;t move empty chunks between lists
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2959'>DRILL-2959</a>] -         Compression codecs are leaking or slow to recapture memory
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2960'>DRILL-2960</a>] -         Default hive storage plugin missing from fresh drill install
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2961'>DRILL-2961</a>] -         Statement.setQueryTimeout() should throw a SQLException
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2962'>DRILL-2962</a>] -         Correlated subquery with scalar aggregate or scalar aggregate with expression throws  and error
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2963'>DRILL-2963</a>] -         Exists with empty left batch causes IllegalStateException
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2966'>DRILL-2966</a>] -         HAVING clause with CASE statement with IN predicate causes assertion
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2968'>DRILL-2968</a>] -         crash on parquet file
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2971'>DRILL-2971</a>] -         If Bit&lt;&gt;Bit connection is unexpectedly closed and we were already blocked on writing to socket, we&#39;ll stay forever in ResettableBarrier.await()
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2973'>DRILL-2973</a>] -         Error messages not showing up in sqlline
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2976'>DRILL-2976</a>] -         Set default of extended JSON support for output to false until issues are resolved
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2977'>DRILL-2977</a>] -         In WorkManager, startFragmentPendingRemote() and addFragmentRunner() need to be permuted
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2978'>DRILL-2978</a>] -         FragmentManager is not removed from the WorkEventBus if it&#39;s FragmentExecutor is cancelled before it starts running
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2979'>DRILL-2979</a>] -         Storage HBase doesn&#39;t support customized hbase property zookeeper.znode.parent
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2989'>DRILL-2989</a>] -         TPCDS Query corrupts Drillbits and causing subsequent unrelated queries to hang (and timeout)
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2993'>DRILL-2993</a>] -         SQLLine hangs when we cancel a query in the middle of displaying results
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2994'>DRILL-2994</a>] -         Incorrect error message when disconnecting from server (using direct connection to drillbit)
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2998'>DRILL-2998</a>] -         Update C++ client to send/receive heartbeat message 
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-3000'>DRILL-3000</a>] -         I got JIRA report #3000.  Now ... to use it for good or evil?
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-3001'>DRILL-3001</a>] -         Some functional tests fail when new text reader is disabled
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-3005'>DRILL-3005</a>] -         Spurious Error messages when using PrintingResultsListener
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-3006'>DRILL-3006</a>] -         CTAS with interval data type creates invalid parquet file
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-3007'>DRILL-3007</a>] -         Update Drill configuration settings to avoid mmap threshold increases on Linux
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-3009'>DRILL-3009</a>] -         Reduce the IN list threshold to take advantage of Values operator
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-3010'>DRILL-3010</a>] -         Convert bad command error messages into UserExceptions in SqlHandlers
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-3012'>DRILL-3012</a>] -         Values Operator doesn&#39;t propagate operator id
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-3017'>DRILL-3017</a>] -         NPE when cleaning up some RecordReader implementations
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-3018'>DRILL-3018</a>] -         Queries with scalar aggregate  and non equality (non correlated) fail to plan
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-3020'>DRILL-3020</a>] -         Some exception message text not displayed in SQLLine, etc.; copy to thrown SQLException&#39;s message
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-3022'>DRILL-3022</a>] -         Ensure sequential shutdown of Drillbits
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-3033'>DRILL-3033</a>] -         Add memory leak fixes found so far in DRILL-1942 to 1.0
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-3037'>DRILL-3037</a>] -         Unable to query on hdfs after moving to 0.9.0 version
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-3046'>DRILL-3046</a>] -         Memory Leak after cancelling a query
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-3047'>DRILL-3047</a>] -         Command failed while establishing connection
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-3048'>DRILL-3048</a>] -         Disable assertions by default
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-3049'>DRILL-3049</a>] -         Increase sort spooling threshold
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-3050'>DRILL-3050</a>] -         Increase query context max memory
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-3051'>DRILL-3051</a>] -         Integer overflow in TimedRunnable
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-3052'>DRILL-3052</a>] -         canceling a fragment executor before it starts running will cause the Foreman to wait indefinitely for a terminal message from that fragment
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-3057'>DRILL-3057</a>] -         A query that used to work before now fails in the optimizer
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-3058'>DRILL-3058</a>] -         RemoteConnection of RPC double closes the connection
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-3061'>DRILL-3061</a>] -         Fix memory leaks in TestDrillbitResilience
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-3062'>DRILL-3062</a>] -         regression: Mondrian query447.q - lots of rows missing in result set
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-3063'>DRILL-3063</a>] -         TestQueriesOnLargeFile leaks memory with 16M limit
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-3065'>DRILL-3065</a>] -         Memory Leak at ExternalSortBatch
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-3066'>DRILL-3066</a>] -         AtomicRemainder - Tried to close remainder, but it has already been closed.
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-3069'>DRILL-3069</a>] -         Wrong result for aggregate query with filter  on SF100 
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-3070'>DRILL-3070</a>] -         Memory Leak when we run out of memory
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-3071'>DRILL-3071</a>] -         RecordBatchLoader#load leaks memory if an exception is thrown while loading the batch.
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-3072'>DRILL-3072</a>] -         Profile UI fails to load when there is an empty json profile
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-3074'>DRILL-3074</a>] -         ReconnectingClient.waitAndRun can stuck in infinite loop if it fails to establish the connection
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-3077'>DRILL-3077</a>] -         sqlline&#39;s return code is 0 even when it force exits due to failed sql command
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-3079'>DRILL-3079</a>] -         Move JSON Execution Plan parsing to FragmentExecutor
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-3080'>DRILL-3080</a>] -         Error message is invalid if workload queue times out
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-3081'>DRILL-3081</a>] -         Fix situation where Drill reports null &lt;--&gt; null in connection error
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-3085'>DRILL-3085</a>] -         In ExternalSortBatch, Memory Leak in Runtime Generation Code
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-3087'>DRILL-3087</a>] -         Union All returns incorrect results.
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-3088'>DRILL-3088</a>] -         IllegalStateException: Cleanup before finished. 0 out of 1 strams have finished
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-3089'>DRILL-3089</a>] -         Revert to 4 forked test and allow override from command line
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-3092'>DRILL-3092</a>] -         Memory leak when an allocation fails near the creation of a RecordBatchData object
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-3093'>DRILL-3093</a>] -         Leaking RawBatchBuffer
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-3098'>DRILL-3098</a>] -         Set Unix style &quot;line.separator&quot; for tests
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-3099'>DRILL-3099</a>] -         FileSelection&#39;s selectionRoot does not include the scheme and authority
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-3100'>DRILL-3100</a>] -         TestImpersonationDisabledWithMiniDFS fails on Windows
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-3101'>DRILL-3101</a>] -         Setting &quot;slice_target&quot; to 1 changes the order of the columns in a &quot;select *&quot; query with order by
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-3103'>DRILL-3103</a>] -         EncoderException: RpcEncoder must produce at least one message.
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-3105'>DRILL-3105</a>] -         OutOfMemoryError: GC overhead limit exceeded
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-3107'>DRILL-3107</a>] -         Dynamic partition pruning fails on Windows (TestDirectoryExplorerUDFs)
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-3109'>DRILL-3109</a>] -         Cancellation from sqlline is broken with the updated version
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-3110'>DRILL-3110</a>] -         OutOfMemoryError causes memory accounting leak 
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-3112'>DRILL-3112</a>] -         Drill UI profile page shows exceptions where a long running query is submitted via the UI
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-3114'>DRILL-3114</a>] -         Sqlline throws exception at launch
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-3115'>DRILL-3115</a>] -         SQLLine colors do not work well with CYGWIN
+</li>
+</ul>
+                    
+<h2>        Improvement
+</h2>
+<ul>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-1662'>DRILL-1662</a>] -         drillbit.sh stop should timeout
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2433'>DRILL-2433</a>] -         Implicit cast between date and timestamp is missing in joins
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2508'>DRILL-2508</a>] -         Add new column to sys.options table that exposes whether or not the current system value is the default
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2602'>DRILL-2602</a>] -         Throw an error on schema change during streaming aggregation
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2697'>DRILL-2697</a>] -         Pause injections should pause indefinitely until signalled
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2725'>DRILL-2725</a>] -         Faster work assignment logic
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2772'>DRILL-2772</a>] -         Display status of query when viewing the query&#39;s profile page
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2946'>DRILL-2946</a>] -         Tableau 9.0 Desktop Enablement Document
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2955'>DRILL-2955</a>] -         Enable color in sqlline for exceptions
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2969'>DRILL-2969</a>] -         Readers don&#39;t report number of records read in profile
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2981'>DRILL-2981</a>] -         Add simplified activity log
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2982'>DRILL-2982</a>] -         Tableau 9.0 Server Enablement Documentation
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2984'>DRILL-2984</a>] -         UserException is logging through its parent class logger
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-3027'>DRILL-3027</a>] -         Add convenience methods to test builder for creating nested baseline values
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-3053'>DRILL-3053</a>] -         add unchecked exception injection site in ChildAllocator
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-3084'>DRILL-3084</a>] -         Add drill-* convenience methods for common cli startup commands
+</li>
+</ul>
+            
+<h2>        New Feature
+</h2>
+<ul>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-1573'>DRILL-1573</a>] -         Add configuration to skip header row in text files
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2382'>DRILL-2382</a>] -         enhance exception injection to support node-specific injections
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2383'>DRILL-2383</a>] -         add exception and pause injections for testing drillbit stability
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2658'>DRILL-2658</a>] -         Add ilike and regex substring functions
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2958'>DRILL-2958</a>] -         Move Drill to alternative cost-based planner for Join planning
+</li>
+</ul>
+                                                    
+<h2>        Task
+</h2>
+<ul>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2316'>DRILL-2316</a>] -         Docs Enhancement: Data Sources and File Formats, Basics Tutorial
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2336'>DRILL-2336</a>] -         configuration storage plugin docs update
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2364'>DRILL-2364</a>] -         JSON Data Model Reference 2nd draft
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2381'>DRILL-2381</a>] -         write lexical structures section, JSON/Parquet reference fixes, updates to data types
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2397'>DRILL-2397</a>] -         Enhance SQL Ref Data Types docs
+</li>
+<li>[<a href='https://issues.apache.org/jira/browse/DRILL-2736'>DRILL-2736</a>] -         review feedback on multitenancy and user auth
+</li>
+</ul>
+
+You can now [download Drill 1.0.0]({{ site.baseurl }}/download/).
+
+
+


[08/14] drill git commit: perf tuning ref

Posted by ts...@apache.org.
perf tuning ref


Project: http://git-wip-us.apache.org/repos/asf/drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/drill/commit/e643acbf
Tree: http://git-wip-us.apache.org/repos/asf/drill/tree/e643acbf
Diff: http://git-wip-us.apache.org/repos/asf/drill/diff/e643acbf

Branch: refs/heads/gh-pages
Commit: e643acbf2d2023d8612d2d5e7e36e219ac49d096
Parents: 8f7dcfa
Author: Kristine Hahn <kh...@maprtech.com>
Authored: Tue May 19 15:56:48 2015 -0700
Committer: Kristine Hahn <kh...@maprtech.com>
Committed: Tue May 19 15:56:48 2015 -0700

----------------------------------------------------------------------
 _data/docs.json                                 | 413 +++++++++++++------
 .../060-performance-tuning-reference.md         |   4 +
 .../060-query-profile-tables.md                 |  84 ----
 .../010-query-profile-tables.md                 |  84 ++++
 .../020-physical-operators.md                   | 115 ++++++
 .../sql-functions/073-log-and-debug.md          |   3 -
 6 files changed, 489 insertions(+), 214 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/drill/blob/e643acbf/_data/docs.json
----------------------------------------------------------------------
diff --git a/_data/docs.json b/_data/docs.json
index 76ac0f9..b9c3f83 100644
--- a/_data/docs.json
+++ b/_data/docs.json
@@ -3239,8 +3239,8 @@
                         }
                     ], 
                     "children": [], 
-                    "next_title": "Query Profile Column Descriptions", 
-                    "next_url": "/docs/query-profile-column-descriptions/", 
+                    "next_title": "Performance Tuning Reference", 
+                    "next_url": "/docs/performance-tuning-reference/", 
                     "parent": "Identifying Performance Issues", 
                     "previous_title": "Query Plans", 
                     "previous_url": "/docs/query-plans/", 
@@ -4243,8 +4243,8 @@
                         }
                     ], 
                     "children": [], 
-                    "next_title": "", 
-                    "next_url": "", 
+                    "next_title": "Query Audit Logging", 
+                    "next_url": "/docs/query-audit-logging/", 
                     "parent": "Log and Debug", 
                     "previous_title": "Modify logback.xml", 
                     "previous_url": "/docs/modify-logback-xml/", 
@@ -4256,9 +4256,9 @@
             "next_title": "Log and Debug Introduction", 
             "next_url": "/docs/log-and-debug-introduction/", 
             "parent": "", 
-            "previous_title": "Project Bylaws", 
-            "previous_url": "/docs/project-bylaws/", 
-            "relative_path": "_docs/sql-reference/sql-functions/073-log-and-debug.md", 
+            "previous_title": "Physical Operators", 
+            "previous_url": "/docs/physical-operators/", 
+            "relative_path": "_docs/073-log-and-debug.md", 
             "title": "Log and Debug", 
             "url": "/docs/log-and-debug/"
         }, 
@@ -5323,8 +5323,8 @@
                                 }
                             ], 
                             "children": [], 
-                            "next_title": "Query Profile Column Descriptions", 
-                            "next_url": "/docs/query-profile-column-descriptions/", 
+                            "next_title": "Performance Tuning Reference", 
+                            "next_url": "/docs/performance-tuning-reference/", 
                             "parent": "Identifying Performance Issues", 
                             "previous_title": "Query Plans", 
                             "previous_url": "/docs/query-plans/", 
@@ -5349,15 +5349,58 @@
                             "url": "/docs/performance-tuning/"
                         }
                     ], 
-                    "children": [], 
-                    "next_title": "Log and Debug", 
-                    "next_url": "/docs/log-and-debug/", 
+                    "children": [
+                        {
+                            "breadcrumbs": [
+                                {
+                                    "title": "Performance Tuning Reference", 
+                                    "url": "/docs/performance-tuning-reference/"
+                                }, 
+                                {
+                                    "title": "Performance Tuning", 
+                                    "url": "/docs/performance-tuning/"
+                                }
+                            ], 
+                            "children": [], 
+                            "next_title": "Physical Operators", 
+                            "next_url": "/docs/physical-operators/", 
+                            "parent": "Performance Tuning Reference", 
+                            "previous_title": "Performance Tuning Reference", 
+                            "previous_url": "/docs/performance-tuning-reference/", 
+                            "relative_path": "_docs/performance-tuning/performance-tuning-reference/010-query-profile-tables.md", 
+                            "title": "Query Profile Column Descriptions", 
+                            "url": "/docs/query-profile-column-descriptions/"
+                        }, 
+                        {
+                            "breadcrumbs": [
+                                {
+                                    "title": "Performance Tuning Reference", 
+                                    "url": "/docs/performance-tuning-reference/"
+                                }, 
+                                {
+                                    "title": "Performance Tuning", 
+                                    "url": "/docs/performance-tuning/"
+                                }
+                            ], 
+                            "children": [], 
+                            "next_title": "Log and Debug", 
+                            "next_url": "/docs/log-and-debug/", 
+                            "parent": "Performance Tuning Reference", 
+                            "previous_title": "Query Profile Column Descriptions", 
+                            "previous_url": "/docs/query-profile-column-descriptions/", 
+                            "relative_path": "_docs/performance-tuning/performance-tuning-reference/020-physical-operators.md", 
+                            "title": "Physical Operators", 
+                            "url": "/docs/physical-operators/"
+                        }
+                    ], 
+                    "next_title": "Query Profile Column Descriptions", 
+                    "next_url": "/docs/query-profile-column-descriptions/", 
                     "parent": "Performance Tuning", 
                     "previous_title": "Query Profiles", 
                     "previous_url": "/docs/query-profiles/", 
-                    "relative_path": "_docs/performance-tuning/060-query-profile-tables.md", 
-                    "title": "Query Profile Column Descriptions", 
-                    "url": "/docs/query-profile-column-descriptions/"
+                    "relative_path": "_docs/performance-tuning/060-performance-tuning-reference.md", 
+                    "title": "Performance Tuning Reference", 
+                    "url": "/docs/performance-tuning-reference/"
                 }
             ], 
             "next_title": "Performance Tuning Introduction", 
@@ -5386,6 +5429,66 @@
             "title": "Performance Tuning Introduction", 
             "url": "/docs/performance-tuning-introduction/"
         }, 
+        "Performance Tuning Reference": {
+            "breadcrumbs": [
+                {
+                    "title": "Performance Tuning", 
+                    "url": "/docs/performance-tuning/"
+                }
+            ], 
+            "children": [
+                {
+                    "breadcrumbs": [
+                        {
+                            "title": "Performance Tuning Reference", 
+                            "url": "/docs/performance-tuning-reference/"
+                        }, 
+                        {
+                            "title": "Performance Tuning", 
+                            "url": "/docs/performance-tuning/"
+                        }
+                    ], 
+                    "children": [], 
+                    "next_title": "Physical Operators", 
+                    "next_url": "/docs/physical-operators/", 
+                    "parent": "Performance Tuning Reference", 
+                    "previous_title": "Performance Tuning Reference", 
+                    "previous_url": "/docs/performance-tuning-reference/", 
+                    "relative_path": "_docs/performance-tuning/performance-tuning-reference/010-query-profile-tables.md", 
+                    "title": "Query Profile Column Descriptions", 
+                    "url": "/docs/query-profile-column-descriptions/"
+                }, 
+                {
+                    "breadcrumbs": [
+                        {
+                            "title": "Performance Tuning Reference", 
+                            "url": "/docs/performance-tuning-reference/"
+                        }, 
+                        {
+                            "title": "Performance Tuning", 
+                            "url": "/docs/performance-tuning/"
+                        }
+                    ], 
+                    "children": [], 
+                    "next_title": "Log and Debug", 
+                    "next_url": "/docs/log-and-debug/", 
+                    "parent": "Performance Tuning Reference", 
+                    "previous_title": "Query Profile Column Descriptions", 
+                    "previous_url": "/docs/query-profile-column-descriptions/", 
+                    "relative_path": "_docs/performance-tuning/performance-tuning-reference/020-physical-operators.md", 
+                    "title": "Physical Operators", 
+                    "url": "/docs/physical-operators/"
+                }
+            ], 
+            "next_title": "Query Profile Column Descriptions", 
+            "next_url": "/docs/query-profile-column-descriptions/", 
+            "parent": "Performance Tuning", 
+            "previous_title": "Query Profiles", 
+            "previous_url": "/docs/query-profiles/", 
+            "relative_path": "_docs/performance-tuning/060-performance-tuning-reference.md", 
+            "title": "Performance Tuning Reference", 
+            "url": "/docs/performance-tuning-reference/"
+        }, 
         "Persistent Configuration Storage": {
             "breadcrumbs": [
                 {
@@ -5407,6 +5510,27 @@
             "title": "Persistent Configuration Storage", 
             "url": "/docs/persistent-configuration-storage/"
         }, 
+        "Physical Operators": {
+            "breadcrumbs": [
+                {
+                    "title": "Performance Tuning Reference", 
+                    "url": "/docs/performance-tuning-reference/"
+                }, 
+                {
+                    "title": "Performance Tuning", 
+                    "url": "/docs/performance-tuning/"
+                }
+            ], 
+            "children": [], 
+            "next_title": "Log and Debug", 
+            "next_url": "/docs/log-and-debug/", 
+            "parent": "Performance Tuning Reference", 
+            "previous_title": "Query Profile Column Descriptions", 
+            "previous_url": "/docs/query-profile-column-descriptions/", 
+            "relative_path": "_docs/performance-tuning/performance-tuning-reference/020-physical-operators.md", 
+            "title": "Physical Operators", 
+            "url": "/docs/physical-operators/"
+        }, 
         "Planning and Execution Options": {
             "breadcrumbs": [
                 {
@@ -5499,8 +5623,8 @@
         "Project Bylaws": {
             "breadcrumbs": [], 
             "children": [], 
-            "next_title": "Log and Debug", 
-            "next_url": "/docs/log-and-debug/", 
+            "next_title": "", 
+            "next_url": "", 
             "parent": "", 
             "previous_title": "2014 Q1 Drill Report", 
             "previous_url": "/docs/2014-q1-drill-report/", 
@@ -5532,8 +5656,8 @@
             "next_title": "Getting Query Information", 
             "next_url": "/docs/getting-query-information/", 
             "parent": "", 
-            "previous_title": "Log and Debug", 
-            "previous_url": "/docs/log-and-debug/", 
+            "previous_title": "Review the Java Stack Trace", 
+            "previous_url": "/docs/review-the-java-stack-trace/", 
             "relative_path": "_docs/074-query-audit-logging.md", 
             "title": "Query Audit Logging", 
             "url": "/docs/query-audit-logging/"
@@ -6164,17 +6288,21 @@
         "Query Profile Column Descriptions": {
             "breadcrumbs": [
                 {
+                    "title": "Performance Tuning Reference", 
+                    "url": "/docs/performance-tuning-reference/"
+                }, 
+                {
                     "title": "Performance Tuning", 
                     "url": "/docs/performance-tuning/"
                 }
             ], 
             "children": [], 
-            "next_title": "Log and Debug", 
-            "next_url": "/docs/log-and-debug/", 
-            "parent": "Performance Tuning", 
-            "previous_title": "Query Profiles", 
-            "previous_url": "/docs/query-profiles/", 
-            "relative_path": "_docs/performance-tuning/060-query-profile-tables.md", 
+            "next_title": "Physical Operators", 
+            "next_url": "/docs/physical-operators/", 
+            "parent": "Performance Tuning Reference", 
+            "previous_title": "Performance Tuning Reference", 
+            "previous_url": "/docs/performance-tuning-reference/", 
+            "relative_path": "_docs/performance-tuning/performance-tuning-reference/010-query-profile-tables.md", 
             "title": "Query Profile Column Descriptions", 
             "url": "/docs/query-profile-column-descriptions/"
         }, 
@@ -6190,8 +6318,8 @@
                 }
             ], 
             "children": [], 
-            "next_title": "Query Profile Column Descriptions", 
-            "next_url": "/docs/query-profile-column-descriptions/", 
+            "next_title": "Performance Tuning Reference", 
+            "next_url": "/docs/performance-tuning-reference/", 
             "parent": "Identifying Performance Issues", 
             "previous_title": "Query Plans", 
             "previous_url": "/docs/query-plans/", 
@@ -6935,8 +7063,8 @@
                 }
             ], 
             "children": [], 
-            "next_title": "", 
-            "next_url": "", 
+            "next_title": "Query Audit Logging", 
+            "next_url": "/docs/query-audit-logging/", 
             "parent": "Log and Debug", 
             "previous_title": "Modify logback.xml", 
             "previous_url": "/docs/modify-logback-xml/", 
@@ -12612,8 +12740,8 @@
                                 }
                             ], 
                             "children": [], 
-                            "next_title": "Query Profile Column Descriptions", 
-                            "next_url": "/docs/query-profile-column-descriptions/", 
+                            "next_title": "Performance Tuning Reference", 
+                            "next_url": "/docs/performance-tuning-reference/", 
                             "parent": "Identifying Performance Issues", 
                             "previous_title": "Query Plans", 
                             "previous_url": "/docs/query-plans/", 
@@ -12638,15 +12766,58 @@
                             "url": "/docs/performance-tuning/"
                         }
                     ], 
-                    "children": [], 
-                    "next_title": "Log and Debug", 
-                    "next_url": "/docs/log-and-debug/", 
+                    "children": [
+                        {
+                            "breadcrumbs": [
+                                {
+                                    "title": "Performance Tuning Reference", 
+                                    "url": "/docs/performance-tuning-reference/"
+                                }, 
+                                {
+                                    "title": "Performance Tuning", 
+                                    "url": "/docs/performance-tuning/"
+                                }
+                            ], 
+                            "children": [], 
+                            "next_title": "Physical Operators", 
+                            "next_url": "/docs/physical-operators/", 
+                            "parent": "Performance Tuning Reference", 
+                            "previous_title": "Performance Tuning Reference", 
+                            "previous_url": "/docs/performance-tuning-reference/", 
+                            "relative_path": "_docs/performance-tuning/performance-tuning-reference/010-query-profile-tables.md", 
+                            "title": "Query Profile Column Descriptions", 
+                            "url": "/docs/query-profile-column-descriptions/"
+                        }, 
+                        {
+                            "breadcrumbs": [
+                                {
+                                    "title": "Performance Tuning Reference", 
+                                    "url": "/docs/performance-tuning-reference/"
+                                }, 
+                                {
+                                    "title": "Performance Tuning", 
+                                    "url": "/docs/performance-tuning/"
+                                }
+                            ], 
+                            "children": [], 
+                            "next_title": "Log and Debug", 
+                            "next_url": "/docs/log-and-debug/", 
+                            "parent": "Performance Tuning Reference", 
+                            "previous_title": "Query Profile Column Descriptions", 
+                            "previous_url": "/docs/query-profile-column-descriptions/", 
+                            "relative_path": "_docs/performance-tuning/performance-tuning-reference/020-physical-operators.md", 
+                            "title": "Physical Operators", 
+                            "url": "/docs/physical-operators/"
+                        }
+                    ], 
+                    "next_title": "Query Profile Column Descriptions", 
+                    "next_url": "/docs/query-profile-column-descriptions/", 
                     "parent": "Performance Tuning", 
                     "previous_title": "Query Profiles", 
                     "previous_url": "/docs/query-profiles/", 
-                    "relative_path": "_docs/performance-tuning/060-query-profile-tables.md", 
-                    "title": "Query Profile Column Descriptions", 
-                    "url": "/docs/query-profile-column-descriptions/"
+                    "relative_path": "_docs/performance-tuning/060-performance-tuning-reference.md", 
+                    "title": "Performance Tuning Reference", 
+                    "url": "/docs/performance-tuning-reference/"
                 }
             ], 
             "next_title": "Performance Tuning Introduction", 
@@ -12660,12 +12831,81 @@
         }, 
         {
             "breadcrumbs": [], 
-            "children": [], 
-            "next_title": "Query Audit Logging", 
-            "next_url": "/docs/query-audit-logging/", 
+            "children": [
+                {
+                    "breadcrumbs": [
+                        {
+                            "title": "Log and Debug", 
+                            "url": "/docs/log-and-debug/"
+                        }
+                    ], 
+                    "children": [], 
+                    "next_title": "Error Messages", 
+                    "next_url": "/docs/error-messages/", 
+                    "parent": "Log and Debug", 
+                    "previous_title": "Log and Debug", 
+                    "previous_url": "/docs/log-and-debug/", 
+                    "relative_path": "_docs/log-and-debug/001-log-and-debug-introduction.md", 
+                    "title": "Log and Debug Introduction", 
+                    "url": "/docs/log-and-debug-introduction/"
+                }, 
+                {
+                    "breadcrumbs": [
+                        {
+                            "title": "Log and Debug", 
+                            "url": "/docs/log-and-debug/"
+                        }
+                    ], 
+                    "children": [], 
+                    "next_title": "Modify logback.xml", 
+                    "next_url": "/docs/modify-logback-xml/", 
+                    "parent": "Log and Debug", 
+                    "previous_title": "Log and Debug Introduction", 
+                    "previous_url": "/docs/log-and-debug-introduction/", 
+                    "relative_path": "_docs/log-and-debug/002-error-messages.md", 
+                    "title": "Error Messages", 
+                    "url": "/docs/error-messages/"
+                }, 
+                {
+                    "breadcrumbs": [
+                        {
+                            "title": "Log and Debug", 
+                            "url": "/docs/log-and-debug/"
+                        }
+                    ], 
+                    "children": [], 
+                    "next_title": "Review the Java Stack Trace", 
+                    "next_url": "/docs/review-the-java-stack-trace/", 
+                    "parent": "Log and Debug", 
+                    "previous_title": "Error Messages", 
+                    "previous_url": "/docs/error-messages/", 
+                    "relative_path": "_docs/log-and-debug/003-modify-logback.xml.md", 
+                    "title": "Modify logback.xml", 
+                    "url": "/docs/modify-logback-xml/"
+                }, 
+                {
+                    "breadcrumbs": [
+                        {
+                            "title": "Log and Debug", 
+                            "url": "/docs/log-and-debug/"
+                        }
+                    ], 
+                    "children": [], 
+                    "next_title": "Query Audit Logging", 
+                    "next_url": "/docs/query-audit-logging/", 
+                    "parent": "Log and Debug", 
+                    "previous_title": "Modify logback.xml", 
+                    "previous_url": "/docs/modify-logback-xml/", 
+                    "relative_path": "_docs/log-and-debug/004-review-the-java-stack-trace.md", 
+                    "title": "Review the Java Stack Trace", 
+                    "url": "/docs/review-the-java-stack-trace/"
+                }
+            ], 
+            "next_title": "Log and Debug Introduction", 
+            "next_url": "/docs/log-and-debug-introduction/", 
             "parent": "", 
-            "previous_title": "Query Profile Column Descriptions", 
-            "previous_url": "/docs/query-profile-column-descriptions/", 
+            "previous_title": "Physical Operators", 
+            "previous_url": "/docs/physical-operators/", 
             "relative_path": "_docs/073-log-and-debug.md", 
             "title": "Log and Debug", 
             "url": "/docs/log-and-debug/"
@@ -12694,8 +12934,8 @@
             "next_title": "Getting Query Information", 
             "next_url": "/docs/getting-query-information/", 
             "parent": "", 
-            "previous_title": "Log and Debug", 
-            "previous_url": "/docs/log-and-debug/", 
+            "previous_title": "Review the Java Stack Trace", 
+            "previous_url": "/docs/review-the-java-stack-trace/", 
             "relative_path": "_docs/074-query-audit-logging.md", 
             "title": "Query Audit Logging", 
             "url": "/docs/query-audit-logging/"
@@ -14511,95 +14751,14 @@
         {
             "breadcrumbs": [], 
             "children": [], 
-            "next_title": "Log and Debug", 
-            "next_url": "/docs/log-and-debug/", 
+            "next_title": "", 
+            "next_url": "", 
             "parent": "", 
             "previous_title": "2014 Q1 Drill Report", 
             "previous_url": "/docs/2014-q1-drill-report/", 
             "relative_path": "_docs/170-bylaws.md", 
             "title": "Project Bylaws", 
             "url": "/docs/project-bylaws/"
-        }, 
-        {
-            "breadcrumbs": [], 
-            "children": [
-                {
-                    "breadcrumbs": [
-                        {
-                            "title": "Log and Debug", 
-                            "url": "/docs/log-and-debug/"
-                        }
-                    ], 
-                    "children": [], 
-                    "next_title": "Error Messages", 
-                    "next_url": "/docs/error-messages/", 
-                    "parent": "Log and Debug", 
-                    "previous_title": "Log and Debug", 
-                    "previous_url": "/docs/log-and-debug/", 
-                    "relative_path": "_docs/log-and-debug/001-log-and-debug-introduction.md", 
-                    "title": "Log and Debug Introduction", 
-                    "url": "/docs/log-and-debug-introduction/"
-                }, 
-                {
-                    "breadcrumbs": [
-                        {
-                            "title": "Log and Debug", 
-                            "url": "/docs/log-and-debug/"
-                        }
-                    ], 
-                    "children": [], 
-                    "next_title": "Modify logback.xml", 
-                    "next_url": "/docs/modify-logback-xml/", 
-                    "parent": "Log and Debug", 
-                    "previous_title": "Log and Debug Introduction", 
-                    "previous_url": "/docs/log-and-debug-introduction/", 
-                    "relative_path": "_docs/log-and-debug/002-error-messages.md", 
-                    "title": "Error Messages", 
-                    "url": "/docs/error-messages/"
-                }, 
-                {
-                    "breadcrumbs": [
-                        {
-                            "title": "Log and Debug", 
-                            "url": "/docs/log-and-debug/"
-                        }
-                    ], 
-                    "children": [], 
-                    "next_title": "Review the Java Stack Trace", 
-                    "next_url": "/docs/review-the-java-stack-trace/", 
-                    "parent": "Log and Debug", 
-                    "previous_title": "Error Messages", 
-                    "previous_url": "/docs/error-messages/", 
-                    "relative_path": "_docs/log-and-debug/003-modify-logback.xml.md", 
-                    "title": "Modify logback.xml", 
-                    "url": "/docs/modify-logback-xml/"
-                }, 
-                {
-                    "breadcrumbs": [
-                        {
-                            "title": "Log and Debug", 
-                            "url": "/docs/log-and-debug/"
-                        }
-                    ], 
-                    "children": [], 
-                    "next_title": "", 
-                    "next_url": "", 
-                    "parent": "Log and Debug", 
-                    "previous_title": "Modify logback.xml", 
-                    "previous_url": "/docs/modify-logback-xml/", 
-                    "relative_path": "_docs/log-and-debug/004-review-the-java-stack-trace.md", 
-                    "title": "Review the Java Stack Trace", 
-                    "url": "/docs/review-the-java-stack-trace/"
-                }
-            ], 
-            "next_title": "Log and Debug Introduction", 
-            "next_url": "/docs/log-and-debug-introduction/", 
-            "parent": "", 
-            "previous_title": "Project Bylaws", 
-            "previous_url": "/docs/project-bylaws/", 
-            "relative_path": "_docs/sql-reference/sql-functions/073-log-and-debug.md", 
-            "title": "Log and Debug", 
-            "url": "/docs/log-and-debug/"
         }
     ]
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/drill/blob/e643acbf/_docs/performance-tuning/060-performance-tuning-reference.md
----------------------------------------------------------------------
diff --git a/_docs/performance-tuning/060-performance-tuning-reference.md b/_docs/performance-tuning/060-performance-tuning-reference.md
new file mode 100755
index 0000000..487ffa8
--- /dev/null
+++ b/_docs/performance-tuning/060-performance-tuning-reference.md
@@ -0,0 +1,4 @@
+---
+title: "Performance Tuning Reference"
+parent: "Performance Tuning"
+--- 

http://git-wip-us.apache.org/repos/asf/drill/blob/e643acbf/_docs/performance-tuning/060-query-profile-tables.md
----------------------------------------------------------------------
diff --git a/_docs/performance-tuning/060-query-profile-tables.md b/_docs/performance-tuning/060-query-profile-tables.md
deleted file mode 100644
index 009516c..0000000
--- a/_docs/performance-tuning/060-query-profile-tables.md
+++ /dev/null
@@ -1,84 +0,0 @@
----
-title: "Query Profile Column Descriptions"
-parent: "Performance Tuning"
---- 
-
-The following tables provide descriptions listed in each of the tables for a query profile.  
-
-
-## Fragment Overview  Table  
-
-Shows aggregate metrics for each major fragment that executed the query.
-
-The following table lists descriptions for each column in the Fragment Overview  
-table:  
-
-| Column Name               | Description                                                                                                                                                                 |
-|---------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| Major Fragment ID         | The coordinate ID of the major fragment. For example, 03-xx-xx where 03 is the major fragment ID followed by xx-xx, which represents the minor fragment ID and operator ID. |
-| Minor Fragments Reporting | The number of minor fragments that Drill parallelized for the major fragment.                                                                                               |
-| First Start               | The total time before the first minor fragment started its task.                                                                                                            |
-| Last Start                | The total time before the last minor fragment started its task.                                                                                                             |
-| First End                 | The total time for the first minor fragment to finish its task.                                                                                                             |
-| Last End                  | The total time for the last minor fragment to finish its task.                                                                                                              |
-| Min Runtime               | The minimum of the total amount of time spent by minor fragments to complete their tasks.                                                                                   |
-| Avg Runtime               | The average of the total amount of time spent by minor fragments to complete their tasks.                                                                                   |
-| Max Runtime               | The maximum of the total amount of time spent by minor fragments to complete their tasks.                                                                                   |
-| Last Update               | The last time one of the minor fragments sent a status update to the Foreman. Time is shown in 24-hour notation.                                                            |
-| Last Progress             | The last time one of the minor fragments made progress, such as a change in fragment state or read data from disk. Time is shown in 24-hour notation.                       |
-| Max Peak Memory           | The maximum of the peak direct memory allocated to any minor fragment.                                                                                                      |
-
-## Major Fragment Block  
-
-Shows metrics for the minor fragments that were parallelized for each major fragment.  
-
-The following table lists descriptions for each column in a major fragment block:  
-
-| Column Name       | Description                                                                                                                                                                                                        |
-|-------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| Minor Fragment ID | The coordinate ID of the minor fragment that was parallelized from the major fragment. For example, 02-03-xx where 02 is the Major Fragment ID, 03 is the Minor Fragment ID, and xx corresponds to an operator ID. |
-| Host              | The node on which the minor fragment carried out its task.                                                                                                                                                         |
-| Start             | The amount of time passed before the minor fragment started its task.                                                                                                                                              |
-| End               | The amount of time passed before the minor fragment finished its task.                                                                                                                                             |
-| Runtime           | The duration of time for the fragment to complete a task. This value equals the difference between End and Start time.                                                                                             |
-| Max Records       | The maximum number of records consumed by an operator from a single input stream.                                                                                                                                  |
-| Max Batches       | The maximum number of input batches across input streams, operators, and minor fragments.                                                                                                                          |
-| Last Update       | The last time this fragment sent a status update to the Foreman. Time is shown in 24-hour notation.                                                                                                                |
-| Last Progress     | The last time this fragment made progress, such as a change in fragment state or reading data from disk. Time is shown in 24-hour notation.                                                                        |
-| Peak Memory       | The peak direct memory allocated during execution for this minor fragment.                                                                                                                                         |
-| State             | The status of the minor fragment; either finished, running, cancelled, or failed.                                                                                                                                  |
-
-
-## Operator Overview  Table  
-
-Shows aggregate metrics for each operator within a major fragment that performed relational operations during query execution.
- 
-The following table lists descriptions for each column in the Operator Overview table:
-
-| Column Name                                          | Description                                                                                                                                                                                                                   |
-|------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| Operator ID                                          | The coordinates of an operator that performed an operation during a particular phase of the query. For example, 02-xx-03 where 02 is the Major Fragment ID, xx corresponds to a Minor Fragment ID, and 03 is the Operator ID. |
-| Type                                                 | The operator type. Operators can be of type project, filter, hash join, single sender, or unordered receiver.                                                                                                                 |
-| Min Setup Time, Avg Setup Time, Max Setup Time       | The minimum, average, and maximum amount of time spent by the operator to set up before performing the operation.                                                                                                             |
-| Min Process Time, Avg Process Time, Max Process Time | The minimum, average, and maximum  amount of time spent by the operator to perform the operation.                                                                                                                             |
-| Wait (min, avg, max)                                 | These fields represent the minimum, average,  and maximum cumulative times spent by operators waiting for external resources.                                                                                                 |
-| Avg Peak Memory                                      | Represents the average of the peak direct memory allocated across minor fragments. Relates to the memory needed by operators to perform their operations, such as hash join or sort.                                          |
-| Max Peak Memory                                      | Represents the maximum of the peak direct memory allocated across minor fragments. Relates to the memory needed by operators to perform their operations, such as  hash join or sort.                                         |  
-
-## Operator Block  
-
-Shows time and memory metrics for each operator type within a major fragment.  
-
-The following table provides descriptions for each column presented in the operator block:  
-
-| Column Name    | Description                                                                                                                                                                                              |
-|----------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| Minor Fragment | The coordinate ID of the minor fragment on which the operator ran. For example, 04-03-01 where 04 is the Major Fragment ID, 03 is the Minor Fragment ID, and 01 is the Operator ID.                      |
-| Setup Time     | The amount of time spent by the operator to set up before performing its operation. This includes run-time code generation and opening a file.                                                           |
-| Process Time   | The amount of time spent by the operator to perform its operation.                                                                                                                                       |
-| Wait Time      | The cumulative amount of time spent by an operator waiting for external resources. such as waiting to send records, waiting to receive records, waiting to write to disk, and waiting to read from disk. |
-| Max Batches    | The maximum number of record batches consumed from a single input stream.                                                                                                                                |
-| Max Records    | The maximum number of records consumed from a single input stream.                                                                                                                                       |
-| Peak Memory    | Represents the peak direct memory allocated. Relates to the memory needed by the operators to perform their operations, such as  hash join and sort.                                                     |  
-
-

http://git-wip-us.apache.org/repos/asf/drill/blob/e643acbf/_docs/performance-tuning/performance-tuning-reference/010-query-profile-tables.md
----------------------------------------------------------------------
diff --git a/_docs/performance-tuning/performance-tuning-reference/010-query-profile-tables.md b/_docs/performance-tuning/performance-tuning-reference/010-query-profile-tables.md
new file mode 100644
index 0000000..7a3db58
--- /dev/null
+++ b/_docs/performance-tuning/performance-tuning-reference/010-query-profile-tables.md
@@ -0,0 +1,84 @@
+---
+title: "Query Profile Column Descriptions"
+parent: "Performance Tuning Reference"
+--- 
+
+The following tables provide descriptions listed in each of the tables for a query profile.  
+
+
+## Fragment Overview  Table  
+
+Shows aggregate metrics for each major fragment that executed the query.
+
+The following table lists descriptions for each column in the Fragment Overview  
+table:  
+
+| Column Name               | Description                                                                                                                                                                 |
+|---------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| Major Fragment ID         | The coordinate ID of the major fragment. For example, 03-xx-xx where 03 is the major fragment ID followed by xx-xx, which represents the minor fragment ID and operator ID. |
+| Minor Fragments Reporting | The number of minor fragments that Drill parallelized for the major fragment.                                                                                               |
+| First Start               | The total time before the first minor fragment started its task.                                                                                                            |
+| Last Start                | The total time before the last minor fragment started its task.                                                                                                             |
+| First End                 | The total time for the first minor fragment to finish its task.                                                                                                             |
+| Last End                  | The total time for the last minor fragment to finish its task.                                                                                                              |
+| Min Runtime               | The minimum of the total amount of time spent by minor fragments to complete their tasks.                                                                                   |
+| Avg Runtime               | The average of the total amount of time spent by minor fragments to complete their tasks.                                                                                   |
+| Max Runtime               | The maximum of the total amount of time spent by minor fragments to complete their tasks.                                                                                   |
+| Last Update               | The last time one of the minor fragments sent a status update to the Foreman. Time is shown in 24-hour notation.                                                            |
+| Last Progress             | The last time one of the minor fragments made progress, such as a change in fragment state or read data from disk. Time is shown in 24-hour notation.                       |
+| Max Peak Memory           | The maximum of the peak direct memory allocated to any minor fragment.                                                                                                      |
+
+## Major Fragment Block  
+
+Shows metrics for the minor fragments that were parallelized for each major fragment.  
+
+The following table lists descriptions for each column in a major fragment block:  
+
+| Column Name       | Description                                                                                                                                                                                                        |
+|-------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| Minor Fragment ID | The coordinate ID of the minor fragment that was parallelized from the major fragment. For example, 02-03-xx where 02 is the Major Fragment ID, 03 is the Minor Fragment ID, and xx corresponds to an operator ID. |
+| Host              | The node on which the minor fragment carried out its task.                                                                                                                                                         |
+| Start             | The amount of time passed before the minor fragment started its task.                                                                                                                                              |
+| End               | The amount of time passed before the minor fragment finished its task.                                                                                                                                             |
+| Runtime           | The duration of time for the fragment to complete a task. This value equals the difference between End and Start time.                                                                                             |
+| Max Records       | The maximum number of records consumed by an operator from a single input stream.                                                                                                                                  |
+| Max Batches       | The maximum number of input batches across input streams, operators, and minor fragments.                                                                                                                          |
+| Last Update       | The last time this fragment sent a status update to the Foreman. Time is shown in 24-hour notation.                                                                                                                |
+| Last Progress     | The last time this fragment made progress, such as a change in fragment state or reading data from disk. Time is shown in 24-hour notation.                                                                        |
+| Peak Memory       | The peak direct memory allocated during execution for this minor fragment.                                                                                                                                         |
+| State             | The status of the minor fragment; either finished, running, cancelled, or failed.                                                                                                                                  |
+
+
+## Operator Overview  Table  
+
+Shows aggregate metrics for each operator within a major fragment that performed relational operations during query execution.
+ 
+The following table lists descriptions for each column in the Operator Overview table:
+
+| Column Name                                          | Description                                                                                                                                                                                                                   |
+|------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| Operator ID                                          | The coordinates of an operator that performed an operation during a particular phase of the query. For example, 02-xx-03 where 02 is the Major Fragment ID, xx corresponds to a Minor Fragment ID, and 03 is the Operator ID. |
+| Type                                                 | The operator type. Operators can be of type project, filter, hash join, single sender, or unordered receiver.                                                                                                                 |
+| Min Setup Time, Avg Setup Time, Max Setup Time       | The minimum, average, and maximum amount of time spent by the operator to set up before performing the operation.                                                                                                             |
+| Min Process Time, Avg Process Time, Max Process Time | The minimum, average, and maximum  amount of time spent by the operator to perform the operation.                                                                                                                             |
+| Wait (min, avg, max)                                 | These fields represent the minimum, average,  and maximum cumulative times spent by operators waiting for external resources.                                                                                                 |
+| Avg Peak Memory                                      | Represents the average of the peak direct memory allocated across minor fragments. Relates to the memory needed by operators to perform their operations, such as hash join or sort.                                          |
+| Max Peak Memory                                      | Represents the maximum of the peak direct memory allocated across minor fragments. Relates to the memory needed by operators to perform their operations, such as  hash join or sort.                                         |  
+
+## Operator Block  
+
+Shows time and memory metrics for each operator type within a major fragment.  
+
+The following table provides descriptions for each column presented in the operator block:  
+
+| Column Name    | Description                                                                                                                                                                                              |
+|----------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| Minor Fragment | The coordinate ID of the minor fragment on which the operator ran. For example, 04-03-01 where 04 is the Major Fragment ID, 03 is the Minor Fragment ID, and 01 is the Operator ID.                      |
+| Setup Time     | The amount of time spent by the operator to set up before performing its operation. This includes run-time code generation and opening a file.                                                           |
+| Process Time   | The amount of time spent by the operator to perform its operation.                                                                                                                                       |
+| Wait Time      | The cumulative amount of time spent by an operator waiting for external resources. such as waiting to send records, waiting to receive records, waiting to write to disk, and waiting to read from disk. |
+| Max Batches    | The maximum number of record batches consumed from a single input stream.                                                                                                                                |
+| Max Records    | The maximum number of records consumed from a single input stream.                                                                                                                                       |
+| Peak Memory    | Represents the peak direct memory allocated. Relates to the memory needed by the operators to perform their operations, such as  hash join and sort.                                                     |  
+
+

http://git-wip-us.apache.org/repos/asf/drill/blob/e643acbf/_docs/performance-tuning/performance-tuning-reference/020-physical-operators.md
----------------------------------------------------------------------
diff --git a/_docs/performance-tuning/performance-tuning-reference/020-physical-operators.md b/_docs/performance-tuning/performance-tuning-reference/020-physical-operators.md
new file mode 100644
index 0000000..36d1928
--- /dev/null
+++ b/_docs/performance-tuning/performance-tuning-reference/020-physical-operators.md
@@ -0,0 +1,115 @@
+---
+title: "Physical Operators"
+parent: "Performance Tuning Reference"
+--- 
+
+This document describes the physical operators that Drill uses in query plans.
+
+## Distribution Operators  
+
+Drill uses the following operators to perform data distribution over the network:  
+
+* HashToRandomExchange
+* HashToMergeExchange
+* UnionExchange
+* SingleMergeExchange
+* BroadcastExchange
+* UnorderedMuxExchange
+
+## Join Operators  
+
+Drill uses the following operators:
+
+| Operator         | Description                                                                                                                                                                                                                                                                                                                                                                                                                                                |
+|------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| Hash Join        | A Hash Join is used for inner joins, left, right and full outer joins.  A hash table is built on the rows produced by the inner child of the Hash Join.  The outer child rows are used to probe the hash table and find matches. This operator Holds the entire dataset for the right hand side of the join in memory  which could be up to 2 billion records per minor fragment.                                                                          |
+| Merge Join       | A Merge Join is used for inner join, left and right outer joins.  Inputs to the Merge Join must be sorted. It reads the sorted input streams from both sides and finds matching rows.  This operator holds the amount of memory of one incoming record batch from each side of the join.   In addition, if there are repeating values in the right hand side of the join, the Merge Join will hold record batches for as long as a repeated value extends. |
+| Nested Loop Join | A Nested Loop Join is used for certain types of cartesian joins and inequality joins.                                                                                                                                                                                                                                                                                                                                                                      |  
+
+## Aggregate Operators  
+
+Drill uses the following aggregate operators:  
+
+| Operator            | Description                                                                                                                                                                                                                                                                                                                                                                                                                           |
+|---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| Hash Aggregate      | A Hash Aggregate performs grouped aggregation on the input data by building a hash table on the GROUP-BY keys and computing the aggregate values within each group. This operator holds memory for each aggregation grouping and each aggregate value, up to 2 billion values per minor fragment.                                                                                                                                     |
+| Streaming Aggregate | A Streaming Aggregate performs grouped aggregation and non-grouped aggregation.  For grouped aggregation, the data must be sorted on the GROUP-BY keys.  Aggregate values are computed within each group.  For non-grouped aggregation, data does not have to be sorted. This operator maintains a single aggregate grouping (keys and aggregate intermediate values) at a time in addition to the size of one incoming record batch. |  
+
+## Sort and Limit Operators  
+
+Drill uses the following sort and limiter operators:  
+
+| Operator     | Description                                                                                                                                                                                                                                                                                                                                                                                                                                                                |
+|--------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| Sort         | A Sort operator is used to perform an ORDER BY and as an upstream operator for other  operations that require sorted data such as Merge Join, Streaming Aggregate.                                                                                                                                                                                                                                                                                                         |
+| ExternalSort | The ExternalSort operator can potentially hold the entire dataset in memory.  This operator will also start spooling to the disk in the case that there is memory pressure.  In this case, the external sort will continue to try to use as much memory as available.  In all cases, external sort will hold at least one record batch in memory for each record spill.  Spills are currently sized based on the amount of memory available to the external sort operator. |
+| TopN         | A TopN operator is used to perform an ORDER BY with LIMIT.                                                                                                                                                                                                                                                                                                                                                                                                                 |
+| Limit        | A Limit operator is used to restrict the number of rows to a value specified by the LIMIT clause.                                                                                                                                                                                                                                                                                                                                                                          |  
+
+## Projection Operators  
+
+Drill uses the following projection operators: 
+
+| Operator     | Description                                                                                                                                                                                                                                                                                                                                                                                                                                                                |
+|--------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| Project      | A Project operator projects columns and/or expressions involving columns and constants. This operator holds one incoming record batch plus any additional materialized projects for the same number of rows as the incoming record batch.                                                                                                                                                                                                                                  |
+| ExternalSort | The ExternalSort operator can potentially hold the entire dataset in memory.  This operator will also start spooling to the disk in the case that there is memory pressure.  In this case, the external sort will continue to try to use as much memory as available.  In all cases, external sort will hold at least one record batch in memory for each record spill.  Spills are currently sized based on the amount of memory available to the external sort operator. |
+| TopN         | A TopN operator is used to perform an ORDER BY with LIMIT.                                                                                                                                                                                                                                                                                                                                                                                                                 |
+| Limit        | A Limit operator is used to restrict the number of rows to a value specified by the LIMIT clause.                                                                                                                                                                                                                                                                                                                                                                          |  
+
+## Filter and Related Operators  
+
+Drill uses the following filter and related operators:  
+
+| Operator               | Description                                                                                                                                                                                                                                                                                                                                                                                      |
+|------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| Filter                 | A Filter operator is used to evaluate the WHERE clause and HAVING clause predicates.  These predicates may consist of join predicates as well as single table predicates.  The join predicates are evaluated by a join operator and the remaining predicates are evaluated by the Filter operator. The amount of memory it consumes is slightly more than the size of one incoming record batch. |
+| SelectionVectorRemover | A SelectionVectorRemover is used in conjunction with either a Sort or Filter operator.  This operator maintains roughly twice the amount of memory as required by a single incoming record batch.                                                                                                                                                                                                |  
+
+## Set Operators  
+
+Drill uses the following set operators:  
+
+| Operator  | Description                                                                                                                                                                                                                                                                                                     |
+|-----------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| Union-All | A Union-All operator accepts rows from 2 input streams and produces a single output stream where the left input rows are emitted first followed by the right input rows. The column names of the output stream are inherited from the left input.  The column types of the two child inputs must be compatible. |  
+
+## Scan Operators  
+
+Drill uses the following scan operators:    
+
+| Operator | Description                                                                                                                                                                                 |
+|----------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| Scan     | Performs a scan of the underlying table.  The table may be in one of several formats, such as Parquet, Text, JSON, and so on. The Scan operator encapsulates the formats into one operator. |  
+
+## Receiver Operators 
+
+Drill uses the following receiver operators: 
+
+| Operator          | Description                                                                                                                                                         |
+|-------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| UnorderedReceiver | The unordered receiver operator can hold up to 5 incoming record batches.                                                                                           |
+| MergingReceiver   | This operator holds up to 5 record batches for each incoming stream (generally either number of nodes or number of sending fragments, depending on use of muxxing). |  
+
+## Sender Operators  
+
+Drill uses the following sender operators:  
+
+| PartitionSender                                                                                                                                                |                                                                                                                |
+|----------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------|
+| The PartitionSender operator maintains a queue for each outbound destination.  May be either the number of outbound minor fragments or the number of the nodes | depending on the use of muxxing operations.  Each queue may store up to 3 record batches for each destination. |  
+
+## File Writers  
+
+Drill uses the following file writers:  
+
+| Operator          | Description                                                                                                                                    |
+|-------------------|------------------------------------------------------------------------------------------------------------------------------------------------|
+| ParquetFileWriter | The ParquetFileWriter buffers approximately twice the default Parquet row group size in memory per minor fragment (default in Drill is 512mb). |
+
+
+
+
+ 
+
+

http://git-wip-us.apache.org/repos/asf/drill/blob/e643acbf/_docs/sql-reference/sql-functions/073-log-and-debug.md
----------------------------------------------------------------------
diff --git a/_docs/sql-reference/sql-functions/073-log-and-debug.md b/_docs/sql-reference/sql-functions/073-log-and-debug.md
deleted file mode 100644
index 142d6b6..0000000
--- a/_docs/sql-reference/sql-functions/073-log-and-debug.md
+++ /dev/null
@@ -1,3 +0,0 @@
----
-title: "Log and Debug"
----
\ No newline at end of file


[10/14] drill git commit: new text reader features

Posted by ts...@apache.org.
new text reader features


Project: http://git-wip-us.apache.org/repos/asf/drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/drill/commit/09c357f6
Tree: http://git-wip-us.apache.org/repos/asf/drill/tree/09c357f6
Diff: http://git-wip-us.apache.org/repos/asf/drill/diff/09c357f6

Branch: refs/heads/gh-pages
Commit: 09c357f68d5c902416cb7501b20f0730af6aebb4
Parents: e643acb
Author: Kristine Hahn <kh...@maprtech.com>
Authored: Tue May 19 20:35:53 2015 -0700
Committer: Kristine Hahn <kh...@maprtech.com>
Committed: Tue May 19 20:35:53 2015 -0700

----------------------------------------------------------------------
 .../035-plugin-configuration-introduction.md    | 152 +++++++++++++++++--
 1 file changed, 138 insertions(+), 14 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/drill/blob/09c357f6/_docs/connect-a-data-source/035-plugin-configuration-introduction.md
----------------------------------------------------------------------
diff --git a/_docs/connect-a-data-source/035-plugin-configuration-introduction.md b/_docs/connect-a-data-source/035-plugin-configuration-introduction.md
index bc4c7e6..e98c8ef 100644
--- a/_docs/connect-a-data-source/035-plugin-configuration-introduction.md
+++ b/_docs/connect-a-data-source/035-plugin-configuration-introduction.md
@@ -25,7 +25,7 @@ The following diagram of the dfs storage plugin briefly describes options you co
 
 ![dfs plugin]({{ site.baseurl }}/docs/img/connect-plugin.png)
 
-The following table describes the attributes you configure for storage plugins in more detail than the diagram. 
+The following table describes the attributes you configure for storage plugins. 
 
 <table>
   <tr>
@@ -38,71 +38,195 @@ The following table describes the attributes you configure for storage plugins i
     <td>"type"</td>
     <td>"file"<br>"hbase"<br>"hive"<br>"mongo"</td>
     <td>yes</td>
-    <td>The storage plugin type name supported by Drill.</td>
+    <td>A valid storage plugin type name.</td>
   </tr>
   <tr>
     <td>"enabled"</td>
     <td>true<br>false</td>
     <td>yes</td>
-    <td>The state of the storage plugin.</td>
+    <td>State of the storage plugin.</td>
   </tr>
   <tr>
     <td>"connection"</td>
     <td>"classpath:///"<br>"file:///"<br>"mongodb://localhost:27017/"<br>"maprfs:///"</td>
     <td>implementation-dependent</td>
-    <td>The type of distributed file system. Drill can work with any distributed system, such as HDFS and S3, or files in your file system.</td>
+    <td>Type of distributed file system, such as HDFS, Amazon S3, or files in your file system.</td>
   </tr>
   <tr>
     <td>"workspaces"</td>
     <td>null<br>"logs"</td>
     <td>no</td>
-    <td>One or more unique workspace names, enclosed in double quotation marks. If a workspace is defined more than once, the latest one overrides the previous ones. Used with local or distributed file systems.</td>
+    <td>One or more unique workspace names. If a workspace is defined more than once, the latest one overrides the previous ones. Used with local or distributed file systems.</td>
   </tr>
   <tr>
     <td>"workspaces". . . "location"</td>
     <td>"location": "/"<br>"location": "/tmp"</td>
     <td>no</td>
-    <td>The path to a directory on the file system.</td>
+    <td>Path to a directory on the file system.</td>
   </tr>
   <tr>
     <td>"workspaces". . . "writable"</td>
     <td>true<br>false</td>
     <td>no</td>
-    <td>One or more unique workspace names, enclosed in double quotation marks. If a workspace is defined more than once, the latest one overrides the previous ones. Not used with local or distributed file systems.</td>
+    <td>One or more unique workspace names. If defined more than once, the last workspace name overrides the others.</td>
   </tr>
   <tr>
     <td>"workspaces". . . "defaultInputFormat"</td>
     <td>null<br>"parquet"<br>"csv"<br>"json"</td>
     <td>no</td>
-    <td>The format of data Drill reads by default, regardless of extension. Parquet is the default.</td>
+    <td>Format for reading data, regardless of extension. Default = Parquet.</td>
   </tr>
   <tr>
     <td>"formats"</td>
     <td>"psv"<br>"csv"<br>"tsv"<br>"parquet"<br>"json"<br>"avro"<br>"maprdb" *</td>
     <td>yes</td>
-    <td>One or more file formats of data Drill can read. Drill can implicitly detect some file formats based on the file extension or the first few bits of data within the file, but you need to configure an option for others.</td>
+    <td>One or more valid file formats for reading. Drill implicitly detects formats of some files based on extension or bits of data in the file, others require configuration.</td>
   </tr>
   <tr>
     <td>"formats" . . . "type"</td>
     <td>"text"<br>"parquet"<br>"json"<br>"maprdb" *</td>
     <td>yes</td>
-    <td>The type of the format specified. For example, you can define two formats, csv and psv, as type "Text", but having different delimiters. </td>
+    <td>Format type. You can define two formats, csv and psv, as type "Text", but having different delimiters. </td>
   </tr>
   <tr>
     <td>formats . . . "extensions"</td>
     <td>["csv"]</td>
     <td>format-dependent</td>
-    <td>The extensions of the files that Drill can read.</td>
+    <td>Extensions of the files that Drill can read.</td>
   </tr>
   <tr>
     <td>"formats" . . . "delimiter"</td>
     <td>"\t"<br>","</td>
     <td>format-dependent</td>
-    <td>The delimiter used to separate columns in text files such as CSV. Specify a non-printable delimiter in the storage plugin config by using the form \uXXXX, where XXXX is the four numeral hex ascii code for the character.</td>
+    <td>One or more characters that separate records in a delimited text file, such as CSV. Use a 4-digit hex ascii code syntax \uXXXX for a non-printable delimiter. </td>
+  </tr>
+  <tr>
+    <td>"formats" . . . "fieldDelimiter"</td>
+    <td>","</td>
+    <td>no</td>
+    <td>A single character that separates each value in a column of a delimited text file.</td>
+  </tr>
+  <tr>
+    <td>"formats" . . . "quote"</td>
+    <td>"""</td>
+    <td>no</td>
+    <td>A single character that starts/ends a value in a delimited text file.</td>
+  </tr>
+  <tr>
+    <td>"formats" . . . "escape"</td>
+    <td>"`"</td>
+    <td>no</td>
+    <td>A single character that escapes the `quote` character.</td>
+  </tr>
+  <tr>
+    <td>"formats" . . . "comment"</td>
+    <td>"#"</td>
+    <td>no</td>
+    <td>The line decoration that starts a comment line in the delimited text file.</td>
+  </tr>
+  <tr>
+    <td>"formats" . . . "skipFirstLine"</td>
+    <td>true</td>
+    <td>no</td>
+    <td>To include or omits the header when reading a delimited text file.
+    </td>
   </tr>
 </table>
 
-\* Pertains only to distributed drill installations using the mapr-drill package.
+\* Pertains only to distributed drill installations using the mapr-drill package.  
+
+## Using the Formats
+
+You can use the following attributes when the `sys.options` property setting `exec.storage.enable_new_text_reader` is true (the default):
+
+* comment  
+* escape  
+* fieldDeliimiter  
+* quote  
+* skipFirstLine
+
+The "formats" apply to all workspaces defined in a storage plugin. A typical use case defines separate storage plugins for different root directories to query the files stored below the directory. An alternative use case defines multiple formats within the same storage plugin and names target files using different extensions to match the formats.
+
+The following example of a storage plugin for reading CSV files with the new text reader includes two formats for reading files having either a `csv` or `csv2` extension. The text reader does include the first line of column names in the queries of `.csv` files but does not include it in queries of `.csv2` files. 
+
+    "csv": {
+      "type": "text",
+      "extensions": [
+        "csv"
+      ],  
+      "delimiter": "," 
+    },  
+    "csv_with_header": {
+      "type": "text",
+      "extensions": [
+        "csv2"
+      ],  
+      "comment": "&",
+      "skipFirstLine": true,
+      "delimiter": "," 
+    },  
+
+### How Formats Affect Output
+
+The following self-explanatory examples show how the output of queries look using different formats:
+
+    SELECT * FROM dfs.`/tmp/csv_no_header.csv`;
+    +------------------------+
+    |        columns         |
+    +------------------------+
+    | ["hello","1","2","3"]  |
+    | ["hello","1","2","3"]  |
+    | ["hello","1","2","3"]  |
+    | ["hello","1","2","3"]  |
+    | ["hello","1","2","3"]  |
+    | ["hello","1","2","3"]  |
+    | ["hello","1","2","3"]  |
+    +------------------------+
+    7 rows selected (0.112 seconds)
+
+    SELECT * FROM dfs.`/tmp/csv_with_comments.csv2`;
+    +------------------------+
+    |        columns         |
+    +------------------------+
+    | ["hello","1","2","3"]  |
+    | ["hello","1","2","3"]  |
+    | ["hello","1","2","3"]  |
+    | ["hello","1","2","3"]  |
+    | ["hello","1","2","3"]  |
+    | ["hello","1","2","3"]  |
+    | ["hello","1","2","3"]  |
+    +------------------------+
+    7 rows selected (0.111 seconds)
+
+    SELECT * FROM dfs.`/tmp/csv_with_escape.csv`;
+    +------------------------------------------------------------------------+
+    |                                columns                                 |
+    +------------------------------------------------------------------------+
+    | ["hello","1","2","3 \" double quote is the default escape character"]  |
+    | ["hello","1","2","3"]                                                  |
+    | ["hello","1","2","3"]                                                  |
+    | ["hello","1","2","3"]                                                  |
+    | ["hello","1","2","3"]                                                  |
+    | ["hello","1","2","3"]                                                  |
+    | ["hello","1","2","3"]                                                  |
+    +------------------------------------------------------------------------+
+    7 rows selected (0.104 seconds)
+
+    SELECT * FROM dfs.`/tmp/csv_with_header.csv2`;
+    +------------------------+
+    |        columns         |
+    +------------------------+
+    | ["hello","1","2","3"]  |
+    | ["hello","1","2","3"]  |
+    | ["hello","1","2","3"]  |
+    | ["hello","1","2","3"]  |
+    | ["hello","1","2","3"]  |
+    | ["hello","1","2","3"]  |
+    | ["hello","1","2","3"]  |
+    +------------------------+
+
+
+## Using Other Attributes
 
 The configuration of other attributes, such as `size.calculator.enabled` in the hbase plugin and `configProps` in the hive plugin, are implementation-dependent and beyond the scope of this document.
 
@@ -113,7 +237,7 @@ As previously mentioned, workspace and storage plugin names are case-sensitive.
 
 For example, using uppercase letters in the query after defining the storage plugin and workspace names using lowercase letters does not work. 
 
-## REST API
+## Storage Plugin REST API
 
 Drill provides a REST API that you can use to create a storage plugin. Use an HTTP POST and pass two properties:
 


[02/14] drill git commit: Merge branch 'gh-pages' of https://github.com/tshiran/drill into gh-pages

Posted by ts...@apache.org.
Merge branch 'gh-pages' of https://github.com/tshiran/drill into gh-pages


Project: http://git-wip-us.apache.org/repos/asf/drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/drill/commit/63efb978
Tree: http://git-wip-us.apache.org/repos/asf/drill/tree/63efb978
Diff: http://git-wip-us.apache.org/repos/asf/drill/diff/63efb978

Branch: refs/heads/gh-pages
Commit: 63efb9787280033794c479f02cd7f1a350bd03e1
Parents: 2c0fb63 9c99ecf
Author: Kristine Hahn <kh...@maprtech.com>
Authored: Tue May 19 11:56:22 2015 -0700
Committer: Kristine Hahn <kh...@maprtech.com>
Committed: Tue May 19 11:56:22 2015 -0700

----------------------------------------------------------------------
 ...19-the-apache-software-foundation-announces-apache-drill-1.0.md | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------



[06/14] drill git commit: BB adds query exec, Aman's review

Posted by ts...@apache.org.
BB adds query exec, Aman's review


Project: http://git-wip-us.apache.org/repos/asf/drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/drill/commit/f4968c30
Tree: http://git-wip-us.apache.org/repos/asf/drill/tree/f4968c30
Diff: http://git-wip-us.apache.org/repos/asf/drill/diff/f4968c30

Branch: refs/heads/gh-pages
Commit: f4968c30841d86c4178974e172c09d854a31f42f
Parents: 9c24b34
Author: Kristine Hahn <kh...@maprtech.com>
Authored: Tue May 19 13:24:16 2015 -0700
Committer: Kristine Hahn <kh...@maprtech.com>
Committed: Tue May 19 13:24:16 2015 -0700

----------------------------------------------------------------------
 _data/docs.json                                 |  75 ++++++++++++++++---
 .../010-architecture-introduction.md            |  15 +---
 _docs/architecture/015-drill-query-execution.md |  65 ++++++++++++++++
 _docs/img/client-phys-plan.png                  | Bin 0 -> 13083 bytes
 _docs/img/ex-operator.png                       | Bin 0 -> 8582 bytes
 _docs/img/execution-tree.PNG                    | Bin 0 -> 13849 bytes
 _docs/img/leaf-frag.png                         | Bin 0 -> 13577 bytes
 _docs/img/min-frag.png                          | Bin 0 -> 14425 bytes
 _docs/img/operators.png                         | Bin 0 -> 45966 bytes
 _docs/img/query-flow-client.png                 | Bin 0 -> 13734 bytes
 .../040-modifying-query-planning-options.md     |   8 +-
 ...esson-3-run-queries-on-complex-data-types.md |   3 +-
 12 files changed, 132 insertions(+), 34 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/drill/blob/f4968c30/_data/docs.json
----------------------------------------------------------------------
diff --git a/_data/docs.json b/_data/docs.json
index eefb5b8..8185d14 100644
--- a/_data/docs.json
+++ b/_data/docs.json
@@ -417,8 +417,8 @@
                         }
                     ], 
                     "children": [], 
-                    "next_title": "Core Modules", 
-                    "next_url": "/docs/core-modules/", 
+                    "next_title": "Drill Query Execution", 
+                    "next_url": "/docs/drill-query-execution/", 
                     "parent": "Architecture", 
                     "previous_title": "Architecture", 
                     "previous_url": "/docs/architecture/", 
@@ -434,11 +434,28 @@
                         }
                     ], 
                     "children": [], 
-                    "next_title": "Performance", 
-                    "next_url": "/docs/performance/", 
+                    "next_title": "Core Modules", 
+                    "next_url": "/docs/core-modules/", 
                     "parent": "Architecture", 
                     "previous_title": "Architecture Introduction", 
                     "previous_url": "/docs/architecture-introduction/", 
+                    "relative_path": "_docs/architecture/015-drill-query-execution.md", 
+                    "title": "Drill Query Execution", 
+                    "url": "/docs/drill-query-execution/"
+                }, 
+                {
+                    "breadcrumbs": [
+                        {
+                            "title": "Architecture", 
+                            "url": "/docs/architecture/"
+                        }
+                    ], 
+                    "children": [], 
+                    "next_title": "Performance", 
+                    "next_url": "/docs/performance/", 
+                    "parent": "Architecture", 
+                    "previous_title": "Drill Query Execution", 
+                    "previous_url": "/docs/drill-query-execution/", 
                     "relative_path": "_docs/architecture/020-core-modules.md", 
                     "title": "Core Modules", 
                     "url": "/docs/core-modules/"
@@ -478,8 +495,8 @@
                 }
             ], 
             "children": [], 
-            "next_title": "Core Modules", 
-            "next_url": "/docs/core-modules/", 
+            "next_title": "Drill Query Execution", 
+            "next_url": "/docs/drill-query-execution/", 
             "parent": "Architecture", 
             "previous_title": "Architecture", 
             "previous_url": "/docs/architecture/", 
@@ -1716,8 +1733,8 @@
             "next_title": "Performance", 
             "next_url": "/docs/performance/", 
             "parent": "Architecture", 
-            "previous_title": "Architecture Introduction", 
-            "previous_url": "/docs/architecture-introduction/", 
+            "previous_title": "Drill Query Execution", 
+            "previous_url": "/docs/drill-query-execution/", 
             "relative_path": "_docs/architecture/020-core-modules.md", 
             "title": "Core Modules", 
             "url": "/docs/core-modules/"
@@ -2763,6 +2780,23 @@
             "title": "Drill Plan Syntax", 
             "url": "/docs/drill-plan-syntax/"
         }, 
+        "Drill Query Execution": {
+            "breadcrumbs": [
+                {
+                    "title": "Architecture", 
+                    "url": "/docs/architecture/"
+                }
+            ], 
+            "children": [], 
+            "next_title": "Core Modules", 
+            "next_url": "/docs/core-modules/", 
+            "parent": "Architecture", 
+            "previous_title": "Architecture Introduction", 
+            "previous_url": "/docs/architecture-introduction/", 
+            "relative_path": "_docs/architecture/015-drill-query-execution.md", 
+            "title": "Drill Query Execution", 
+            "url": "/docs/drill-query-execution/"
+        }, 
         "Drill in 10 Minutes": {
             "breadcrumbs": [
                 {
@@ -10371,8 +10405,8 @@
                         }
                     ], 
                     "children": [], 
-                    "next_title": "Core Modules", 
-                    "next_url": "/docs/core-modules/", 
+                    "next_title": "Drill Query Execution", 
+                    "next_url": "/docs/drill-query-execution/", 
                     "parent": "Architecture", 
                     "previous_title": "Architecture", 
                     "previous_url": "/docs/architecture/", 
@@ -10388,11 +10422,28 @@
                         }
                     ], 
                     "children": [], 
-                    "next_title": "Performance", 
-                    "next_url": "/docs/performance/", 
+                    "next_title": "Core Modules", 
+                    "next_url": "/docs/core-modules/", 
                     "parent": "Architecture", 
                     "previous_title": "Architecture Introduction", 
                     "previous_url": "/docs/architecture-introduction/", 
+                    "relative_path": "_docs/architecture/015-drill-query-execution.md", 
+                    "title": "Drill Query Execution", 
+                    "url": "/docs/drill-query-execution/"
+                }, 
+                {
+                    "breadcrumbs": [
+                        {
+                            "title": "Architecture", 
+                            "url": "/docs/architecture/"
+                        }
+                    ], 
+                    "children": [], 
+                    "next_title": "Performance", 
+                    "next_url": "/docs/performance/", 
+                    "parent": "Architecture", 
+                    "previous_title": "Drill Query Execution", 
+                    "previous_url": "/docs/drill-query-execution/", 
                     "relative_path": "_docs/architecture/020-core-modules.md", 
                     "title": "Core Modules", 
                     "url": "/docs/core-modules/"

http://git-wip-us.apache.org/repos/asf/drill/blob/f4968c30/_docs/architecture/010-architecture-introduction.md
----------------------------------------------------------------------
diff --git a/_docs/architecture/010-architecture-introduction.md b/_docs/architecture/010-architecture-introduction.md
old mode 100644
new mode 100755
index e80af26..7c06469
--- a/_docs/architecture/010-architecture-introduction.md
+++ b/_docs/architecture/010-architecture-introduction.md
@@ -29,20 +29,7 @@ Though Drill works in a Hadoop cluster environment, Drill is not tied to
 Hadoop and can run in any distributed cluster environment. The only pre-
 requisite for Drill is Zookeeper.
 
-## Query Flow in Drill
-
-The following image represents the flow of a Drill query:
- 
-![drill query flow]({{ site.baseurl }}/docs/img/queryFlow.png)
-
-The flow of a Drill query typically involves the following steps:
-
-  1. The Drill client issues a query. Any Drillbit in the cluster can accept queries from clients. There is no master-slave concept.
-  2. The Drillbit then parses the query, optimizes it, and generates an optimized distributed query plan for fast and efficient execution.
-  3. The Drillbit that accepts the query becomes the driving Drillbit node for the request. It gets a list of available Drillbit nodes in the cluster from ZooKeeper. The driving Drillbit determines the appropriate nodes to execute various query plan fragments to maximize data locality.
-  4. The Drillbit schedules the execution of query fragments on individual nodes according to the execution plan.
-  5. The individual nodes finish their execution and return data to the driving Drillbit.
-  6. The driving Drillbit returns results back to the client.
+See Drill Query Execution.
 
 ## Drill Clients
 

http://git-wip-us.apache.org/repos/asf/drill/blob/f4968c30/_docs/architecture/015-drill-query-execution.md
----------------------------------------------------------------------
diff --git a/_docs/architecture/015-drill-query-execution.md b/_docs/architecture/015-drill-query-execution.md
new file mode 100755
index 0000000..730460a
--- /dev/null
+++ b/_docs/architecture/015-drill-query-execution.md
@@ -0,0 +1,65 @@
+---
+title: "Drill Query Execution"
+parent: "Architecture"
+---
+
+When you submit a Drill query, a client or an application sends the query in the form of an SQL statement to a Drillbit in the Drill cluster. A Drillbit is the process running on each active Drill node that coordinates, plans, and executes queries, as well as distributes query work across the cluster to maximize data locality.
+
+The following image represents the communication between clients, applications, and Drillbits:
+
+![]({{ site.baseurl }}/docs/img/query-flow-client.png)
+
+The Drillbit that receives the query from a client or application becomes the Foreman for the query and drives the entire query. A parser in the Foreman parses the SQL, applying custom rules to convert specific SQL operators into a specific logical operator syntax that Drill understands. This collection of logical operators forms a logical plan. The logical plan describes the work required to generate the query results and defines what data sources and operations to apply.
+
+The Foreman sends the logical plan into a cost-based optimizer to optimize the order of SQL operators in a statement and read the logical plan. The optimizer applies various types of rules to rearrange operators and functions into an optimal plan. The optimizer converts the logical plan into a physical plan that describes how to execute the query.
+
+![]({{ site.baseurl }}/docs/img/client-phys-plan.png)
+
+A parallelizer in the Foreman transforms the physical plan into multiple phases, called major and minor fragments. These fragments create a multi-level execution tree that rewrites the query and executes it in parallel against the configured data sources, sending the results back to the client or application.
+
+![]({{ site.baseurl }}/docs/img/execution-tree.png)  
+
+
+## Major Fragments
+A major fragment is an abstract concept that represents a phase of the query execution. A phase can consist of one or multiple operations that Drill must perform to execute the query. Drill assigns each major fragment a MajorFragmentID.
+
+For example, to perform a hash aggregation of two files, Drill may create a plan with two major phases (major fragments) where the first phase is dedicated to scanning the two files and the second phase is dedicated to the aggregation of the data.  
+
+![]({{ site.baseurl }}/docs/img/ex-operator.png)
+
+Drill separates major fragments by an exchange operator. An exchange is a change in data location and/or parallelization of the physical plan. An exchange is composed of a sender and a receiver to allow data to move between nodes. 
+
+Major fragments do not actually perform any query tasks. Each major fragment is divided into one or multiple minor fragments (discussed in the next section) that actually execute the operations required to complete the query and return results back to the client.
+
+You can interact with major fragments within the physical plan by capturing a JSON representation of the plan in a file, manually modifying it, and then submitting it back to Drill using the SUBMIT PLAN command. You can also view major fragments in the query profile, which is visible in the Drill Web UI. See [EXPLAIN ]({{ site.baseurl }}/docs/explain/)and [Query Profiles]({{ site.baseurl }}/docs/query-profiles/) for more information.
+
+## Minor Fragments
+Each major fragment is parallelized into minor fragments. A minor fragment is a logical unit of work that runs inside of a thread. A logical unit of work in Drill is also referred to as a slice. The execution plan that Drill creates is composed of minor fragments. Drill assigns each minor fragment a MinorFragmentID.  
+
+![]({{ site.baseurl }}/docs/img/min-frag.png)
+
+The parallelizer in the Foreman creates one or more minor fragments from a major fragment at execution time, by breaking a major fragment into as many minor fragments as it can run simultaneously on the cluster.
+
+Drill executes each minor fragment in its own thread as quickly as possible based on its upstream data requirements. Drill schedules the minor fragments on nodes with data locality. Otherwise, Drill schedules them in a round-robin fashion on the existing, available Drillbits.
+
+Minor fragments contain one or more relational operators. An operator performs a relational operation, such as scan, filter, join, or group by. Each operator has a particular operator type and an OperatorID. Each OperatorID defines its relationship within the minor fragment to which it belongs.  
+
+![]({{ site.baseurl }}/docs/img/operators.png)
+
+For example, when performing a hash aggregation of two files, Drill breaks the first phase dedicated to scanning into two minor fragments. Each minor fragment contains scan operators that scan the files. Drill breaks the second phase dedicated to aggregation into four minor fragments. Each of the four minor fragments contain hash aggregate operators that perform the hash  aggregation operations on the data. 
+
+You cannot modify the number of minor fragments within the execution plan. However, you can view the query profile in the Drill Web UI and modify some configuration options that change the behavior of minor fragments, such as the maximum number of slices. See [Configuration Options]({{ site.baseurl }}/docs/configuration-options-introduction/) for more information.
+
+### Execution of Minor Fragments
+Minor fragments can run as root, intermediate, or leaf fragments. An execution tree contains only one root fragment. The coordinates of the execution tree are numbered from the root, with the root being zero. Data flows downstream from the leaf fragments to the root fragment.
+ 
+The root fragment runs in the Foreman and receives incoming queries, reads metadata from tables, rewrites the queries and routes them to the next level in the serving tree. The other fragments become intermediate or leaf fragments.  
+
+Intermediate fragments start work when data is available or fed to them from other fragments. They perform operations on the data and then send the data downstream. They also pass the aggregated results to the root fragment, which performs further aggregation and provides the query results to the client or application.
+
+The leaf fragments scan tables in parallel and communicate with the storage layer or access data on local disk. The leaf fragments pass partial results to the intermediate fragments, which perform parallel operations on intermediate results.
+
+![]({{ site.baseurl }}/docs/leaf-frag.png)
+
+Drill only plans queries that have concurrent running fragments. For example, if 20 available slices exist in the cluster, Drill plans a query that runs no more than 20 minor fragments in a particular major fragment. Drill is optimistic and assumes that it can complete all of the work in parallel. All minor fragments for a particular major fragment start at the same time based on their upstream data dependency.
+

http://git-wip-us.apache.org/repos/asf/drill/blob/f4968c30/_docs/img/client-phys-plan.png
----------------------------------------------------------------------
diff --git a/_docs/img/client-phys-plan.png b/_docs/img/client-phys-plan.png
new file mode 100755
index 0000000..2314c8c
Binary files /dev/null and b/_docs/img/client-phys-plan.png differ

http://git-wip-us.apache.org/repos/asf/drill/blob/f4968c30/_docs/img/ex-operator.png
----------------------------------------------------------------------
diff --git a/_docs/img/ex-operator.png b/_docs/img/ex-operator.png
new file mode 100755
index 0000000..8a04af8
Binary files /dev/null and b/_docs/img/ex-operator.png differ

http://git-wip-us.apache.org/repos/asf/drill/blob/f4968c30/_docs/img/execution-tree.PNG
----------------------------------------------------------------------
diff --git a/_docs/img/execution-tree.PNG b/_docs/img/execution-tree.PNG
new file mode 100755
index 0000000..9fb8026
Binary files /dev/null and b/_docs/img/execution-tree.PNG differ

http://git-wip-us.apache.org/repos/asf/drill/blob/f4968c30/_docs/img/leaf-frag.png
----------------------------------------------------------------------
diff --git a/_docs/img/leaf-frag.png b/_docs/img/leaf-frag.png
new file mode 100755
index 0000000..5e3e973
Binary files /dev/null and b/_docs/img/leaf-frag.png differ

http://git-wip-us.apache.org/repos/asf/drill/blob/f4968c30/_docs/img/min-frag.png
----------------------------------------------------------------------
diff --git a/_docs/img/min-frag.png b/_docs/img/min-frag.png
new file mode 100755
index 0000000..20b13e4
Binary files /dev/null and b/_docs/img/min-frag.png differ

http://git-wip-us.apache.org/repos/asf/drill/blob/f4968c30/_docs/img/operators.png
----------------------------------------------------------------------
diff --git a/_docs/img/operators.png b/_docs/img/operators.png
new file mode 100755
index 0000000..12a7b3e
Binary files /dev/null and b/_docs/img/operators.png differ

http://git-wip-us.apache.org/repos/asf/drill/blob/f4968c30/_docs/img/query-flow-client.png
----------------------------------------------------------------------
diff --git a/_docs/img/query-flow-client.png b/_docs/img/query-flow-client.png
new file mode 100755
index 0000000..10fe24f
Binary files /dev/null and b/_docs/img/query-flow-client.png differ

http://git-wip-us.apache.org/repos/asf/drill/blob/f4968c30/_docs/performance-tuning/query-plans-and-tuning/040-modifying-query-planning-options.md
----------------------------------------------------------------------
diff --git a/_docs/performance-tuning/query-plans-and-tuning/040-modifying-query-planning-options.md b/_docs/performance-tuning/query-plans-and-tuning/040-modifying-query-planning-options.md
old mode 100755
new mode 100644
index fed5c25..f6fdb5d
--- a/_docs/performance-tuning/query-plans-and-tuning/040-modifying-query-planning-options.md
+++ b/_docs/performance-tuning/query-plans-and-tuning/040-modifying-query-planning-options.md
@@ -7,9 +7,8 @@ Planner options affect how Drill plans a query. You can use the ALTER SYSTEM|SES
  
 The following planning options affect query planning and performance:
 
-* **planner.width.max\_per_node** 
-
-     Default is 3. Configure this option to achieve fine grained, absolute control over parallelization.
+* **planner.width.max\_per_node**  
+     Configure this option to achieve fine grained, absolute control over parallelization.
 
      In this context width refers to fan out or distribution potential: the ability to run a query in parallel across the cores on a node and the nodes on a cluster. A physical plan consists of intermediate operations, known as query "fragments," that run concurrently, yielding opportunities for parallelism above and below each exchange operator in the plan. An exchange operator represents a breakpoint in the execution flow where processing can be distributed. For example, a single-process scan of a file may flow into an exchange operator, followed by a multi-process aggregation fragment.
  
@@ -19,15 +18,12 @@ The following planning options affect query planning and performance:
      When you modify the default setting, you can supply any meaningful number. The system does not automatically scale down your setting.  
 
 * **planner.width\_max\_per_query**  
-
      Default is 1000. The maximum number of threads than can run in parallel for a query across all nodes. Only change this setting when Drill over-parallelizes on very large clusters.
  
 * **planner.slice_target**  
-
      Default is 100000. The minimum number of estimated records to work with in a major fragment before applying additional parallelization.
  
 * **planner.broadcast_threshold**  
-
      Default is 10000000. The maximum number of records allowed to be broadcast as part of a join. After one million records, Drill reshuffles data rather than doing a broadcast to one side of the join. To improve performance you can increase this number, especially on 10GB Ethernet clusters.
  
 

http://git-wip-us.apache.org/repos/asf/drill/blob/f4968c30/_docs/tutorials/learn-drill-with-the-mapr-sandbox/050-lesson-3-run-queries-on-complex-data-types.md
----------------------------------------------------------------------
diff --git a/_docs/tutorials/learn-drill-with-the-mapr-sandbox/050-lesson-3-run-queries-on-complex-data-types.md b/_docs/tutorials/learn-drill-with-the-mapr-sandbox/050-lesson-3-run-queries-on-complex-data-types.md
index a41b4a4..17b904b 100644
--- a/_docs/tutorials/learn-drill-with-the-mapr-sandbox/050-lesson-3-run-queries-on-complex-data-types.md
+++ b/_docs/tutorials/learn-drill-with-the-mapr-sandbox/050-lesson-3-run-queries-on-complex-data-types.md
@@ -289,8 +289,7 @@ in descending order. Only clicks that have resulted in a purchase are counted.
   
 ## Store a Result Set in a Table for Reuse and Analysis
 
-Finally, run another correlated subquery that returns a fairly large result
-set. To facilitate additional analysis on this result set, you can easily and
+To facilitate additional analysis on this result set, you can easily and
 quickly create a Drill table from the results of the query.
 
 ### Continue to use the dfs.clicks workspace


[11/14] drill git commit: Merge branch 'gh-pages' of https://github.com/tshiran/drill into gh-pages

Posted by ts...@apache.org.
Merge branch 'gh-pages' of https://github.com/tshiran/drill into gh-pages


Project: http://git-wip-us.apache.org/repos/asf/drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/drill/commit/3cca29fe
Tree: http://git-wip-us.apache.org/repos/asf/drill/tree/3cca29fe
Diff: http://git-wip-us.apache.org/repos/asf/drill/diff/3cca29fe

Branch: refs/heads/gh-pages
Commit: 3cca29feccb307019e7892422527e740fa65277c
Parents: 09c357f 4fd1e79
Author: Kristine Hahn <kh...@maprtech.com>
Authored: Tue May 19 20:41:26 2015 -0700
Committer: Kristine Hahn <kh...@maprtech.com>
Committed: Tue May 19 20:41:26 2015 -0700

----------------------------------------------------------------------
 .../020-using-jdbc-with-squirrel-on-windows.md                     | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------



[09/14] drill git commit: Updated the download link to 1.0.0

Posted by ts...@apache.org.
Updated the download link to 1.0.0


Project: http://git-wip-us.apache.org/repos/asf/drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/drill/commit/4fd1e791
Tree: http://git-wip-us.apache.org/repos/asf/drill/tree/4fd1e791
Diff: http://git-wip-us.apache.org/repos/asf/drill/diff/4fd1e791

Branch: refs/heads/gh-pages
Commit: 4fd1e791fc59e7948b90dae52b1d1a86e3ff7a9f
Parents: e643acb
Author: Bob Rumsby <br...@mapr.com>
Authored: Tue May 19 16:27:36 2015 -0700
Committer: Bob Rumsby <br...@mapr.com>
Committed: Tue May 19 16:27:36 2015 -0700

----------------------------------------------------------------------
 .../020-using-jdbc-with-squirrel-on-windows.md                     | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/drill/blob/4fd1e791/_docs/odbc-jdbc-interfaces/020-using-jdbc-with-squirrel-on-windows.md
----------------------------------------------------------------------
diff --git a/_docs/odbc-jdbc-interfaces/020-using-jdbc-with-squirrel-on-windows.md b/_docs/odbc-jdbc-interfaces/020-using-jdbc-with-squirrel-on-windows.md
index be1b173..1c7d465 100755
--- a/_docs/odbc-jdbc-interfaces/020-using-jdbc-with-squirrel-on-windows.md
+++ b/_docs/odbc-jdbc-interfaces/020-using-jdbc-with-squirrel-on-windows.md
@@ -30,7 +30,7 @@ machine:
     <drill_installation_directory>/jars/jdbc-driver/drill-jdbc-all-<version>.jar
 
 Or, you can download the [apache-
-drill-0.9.0.tar.gz](http://apache.osuosl.org/drill/drill-0.9.0/apache-drill-0.9.0-src.tar.gz) file to a location on your Windows machine, and
+drill-1.0.0.tar.gz](http://apache.osuosl.org/drill/drill-1.0.0/apache-drill-1.0.0-src.tar.gz) file to a location on your Windows machine, and
 extract the contents of the file. You may need to use a decompression utility,
 such as [7-zip](http://www.7-zip.org/) to extract the archive. Once extracted,
 you can locate the driver in the following directory:


[03/14] drill git commit: release notes

Posted by ts...@apache.org.
release notes


Project: http://git-wip-us.apache.org/repos/asf/drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/drill/commit/cdb566ce
Tree: http://git-wip-us.apache.org/repos/asf/drill/tree/cdb566ce
Diff: http://git-wip-us.apache.org/repos/asf/drill/diff/cdb566ce

Branch: refs/heads/gh-pages
Commit: cdb566ce8c200298e8af0cd96dcd3756526db94f
Parents: 63efb97
Author: Kristine Hahn <kh...@maprtech.com>
Authored: Tue May 19 11:58:55 2015 -0700
Committer: Kristine Hahn <kh...@maprtech.com>
Committed: Tue May 19 11:58:55 2015 -0700

----------------------------------------------------------------------
 _data/docs.json          | 45 +++++++++++++++++++++++++++++--------------
 _docs/rn/080-1.0.0-rn.md |  2 +-
 2 files changed, 32 insertions(+), 15 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/drill/blob/cdb566ce/_data/docs.json
----------------------------------------------------------------------
diff --git a/_data/docs.json b/_data/docs.json
index 7c6e8de..eefb5b8 100644
--- a/_data/docs.json
+++ b/_data/docs.json
@@ -321,14 +321,31 @@
                 }
             ], 
             "children": [], 
+            "next_title": "Apache Drill 1.0.0 Release Notes", 
+            "next_url": "/docs/apache-drill-1-0-0-release-notes/", 
+            "parent": "Release Notes", 
+            "previous_title": "Apache Drill 0.8.0 Release Notes", 
+            "previous_url": "/docs/apache-drill-0-8-0-release-notes/", 
+            "relative_path": "_docs/rn/070-0.9.0-rn copy.md", 
+            "title": "Apache Drill 0.9.0 Release Notes", 
+            "url": "/docs/apache-drill-0-9-0-release-notes/"
+        }, 
+        "Apache Drill 1.0.0 Release Notes": {
+            "breadcrumbs": [
+                {
+                    "title": "Release Notes", 
+                    "url": "/docs/release-notes/"
+                }
+            ], 
+            "children": [], 
             "next_title": "Sample Datasets", 
             "next_url": "/docs/sample-datasets/", 
             "parent": "Release Notes", 
             "previous_title": "Apache Drill 0.9.0 Release Notes", 
             "previous_url": "/docs/apache-drill-0-9-0-release-notes/", 
             "relative_path": "_docs/rn/080-1.0.0-rn.md", 
-            "title": "Apache Drill 0.9.0 Release Notes", 
-            "url": "/docs/apache-drill-0-9-0-release-notes/"
+            "title": "Apache Drill 1.0.0 Release Notes", 
+            "url": "/docs/apache-drill-1-0-0-release-notes/"
         }, 
         "Apache Drill Contribution Guidelines": {
             "breadcrumbs": [
@@ -6789,8 +6806,8 @@
                         }
                     ], 
                     "children": [], 
-                    "next_title": "Apache Drill 0.9.0 Release Notes", 
-                    "next_url": "/docs/apache-drill-0-9-0-release-notes/", 
+                    "next_title": "Apache Drill 1.0.0 Release Notes", 
+                    "next_url": "/docs/apache-drill-1-0-0-release-notes/", 
                     "parent": "Release Notes", 
                     "previous_title": "Apache Drill 0.8.0 Release Notes", 
                     "previous_url": "/docs/apache-drill-0-8-0-release-notes/", 
@@ -6812,8 +6829,8 @@
                     "previous_title": "Apache Drill 0.9.0 Release Notes", 
                     "previous_url": "/docs/apache-drill-0-9-0-release-notes/", 
                     "relative_path": "_docs/rn/080-1.0.0-rn.md", 
-                    "title": "Apache Drill 0.9.0 Release Notes", 
-                    "url": "/docs/apache-drill-0-9-0-release-notes/"
+                    "title": "Apache Drill 1.0.0 Release Notes", 
+                    "url": "/docs/apache-drill-1-0-0-release-notes/"
                 }
             ], 
             "next_title": "Apache Drill 0.5.0 Release Notes", 
@@ -8932,8 +8949,8 @@
             "next_title": "AOL Search", 
             "next_url": "/docs/aol-search/", 
             "parent": "", 
-            "previous_title": "Apache Drill 0.9.0 Release Notes", 
-            "previous_url": "/docs/apache-drill-0-9-0-release-notes/", 
+            "previous_title": "Apache Drill 1.0.0 Release Notes", 
+            "previous_url": "/docs/apache-drill-1-0-0-release-notes/", 
             "relative_path": "_docs/140-sample-datasets.md", 
             "title": "Sample Datasets", 
             "url": "/docs/sample-datasets/"
@@ -14195,8 +14212,8 @@
                         }
                     ], 
                     "children": [], 
-                    "next_title": "Apache Drill 0.9.0 Release Notes", 
-                    "next_url": "/docs/apache-drill-0-9-0-release-notes/", 
+                    "next_title": "Apache Drill 1.0.0 Release Notes", 
+                    "next_url": "/docs/apache-drill-1-0-0-release-notes/", 
                     "parent": "Release Notes", 
                     "previous_title": "Apache Drill 0.8.0 Release Notes", 
                     "previous_url": "/docs/apache-drill-0-8-0-release-notes/", 
@@ -14218,8 +14235,8 @@
                     "previous_title": "Apache Drill 0.9.0 Release Notes", 
                     "previous_url": "/docs/apache-drill-0-9-0-release-notes/", 
                     "relative_path": "_docs/rn/080-1.0.0-rn.md", 
-                    "title": "Apache Drill 0.9.0 Release Notes", 
-                    "url": "/docs/apache-drill-0-9-0-release-notes/"
+                    "title": "Apache Drill 1.0.0 Release Notes", 
+                    "url": "/docs/apache-drill-1-0-0-release-notes/"
                 }
             ], 
             "next_title": "Apache Drill 0.5.0 Release Notes", 
@@ -14289,8 +14306,8 @@
             "next_title": "AOL Search", 
             "next_url": "/docs/aol-search/", 
             "parent": "", 
-            "previous_title": "Apache Drill 0.9.0 Release Notes", 
-            "previous_url": "/docs/apache-drill-0-9-0-release-notes/", 
+            "previous_title": "Apache Drill 1.0.0 Release Notes", 
+            "previous_url": "/docs/apache-drill-1-0-0-release-notes/", 
             "relative_path": "_docs/140-sample-datasets.md", 
             "title": "Sample Datasets", 
             "url": "/docs/sample-datasets/"

http://git-wip-us.apache.org/repos/asf/drill/blob/cdb566ce/_docs/rn/080-1.0.0-rn.md
----------------------------------------------------------------------
diff --git a/_docs/rn/080-1.0.0-rn.md b/_docs/rn/080-1.0.0-rn.md
index 6c60b39..5755568 100755
--- a/_docs/rn/080-1.0.0-rn.md
+++ b/_docs/rn/080-1.0.0-rn.md
@@ -1,5 +1,5 @@
 ---
-title: "Apache Drill 0.9.0 Release Notes"
+title: "Apache Drill 1.0.0 Release Notes"
 parent: "Release Notes"
 ---
  Today we're happy to announce the availability of Drill 1.0.0, providing additional enhancements and bug fixes. This release includes the following new features, enhancements, and bug fixes:


[12/14] drill git commit: rewrite attribute definitions

Posted by ts...@apache.org.
rewrite attribute definitions


Project: http://git-wip-us.apache.org/repos/asf/drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/drill/commit/a37c3474
Tree: http://git-wip-us.apache.org/repos/asf/drill/tree/a37c3474
Diff: http://git-wip-us.apache.org/repos/asf/drill/diff/a37c3474

Branch: refs/heads/gh-pages
Commit: a37c3474aeb652236107afc5855068fc419a4902
Parents: 3cca29f
Author: Kristine Hahn <kh...@maprtech.com>
Authored: Tue May 19 21:08:11 2015 -0700
Committer: Kristine Hahn <kh...@maprtech.com>
Committed: Tue May 19 21:08:11 2015 -0700

----------------------------------------------------------------------
 .../035-plugin-configuration-introduction.md              | 10 ++++------
 1 file changed, 4 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/drill/blob/a37c3474/_docs/connect-a-data-source/035-plugin-configuration-introduction.md
----------------------------------------------------------------------
diff --git a/_docs/connect-a-data-source/035-plugin-configuration-introduction.md b/_docs/connect-a-data-source/035-plugin-configuration-introduction.md
index e98c8ef..1ec6461 100644
--- a/_docs/connect-a-data-source/035-plugin-configuration-introduction.md
+++ b/_docs/connect-a-data-source/035-plugin-configuration-introduction.md
@@ -21,12 +21,10 @@ name. Names are case-sensitive.
 Click Update to reconfigure an existing, enabled storage plugin.
 
 ## Storage Plugin Attributes
-The following diagram of the dfs storage plugin briefly describes options you configure in a typical storage plugin configuration:
-
+The following graphic shows key attributes of a typical dfs storage plugin:  
 ![dfs plugin]({{ site.baseurl }}/docs/img/connect-plugin.png)
-
+## List of Attributes and Definitions
 The following table describes the attributes you configure for storage plugins. 
-
 <table>
   <tr>
     <th>Attribute</th>
@@ -56,7 +54,7 @@ The following table describes the attributes you configure for storage plugins.
     <td>"workspaces"</td>
     <td>null<br>"logs"</td>
     <td>no</td>
-    <td>One or more unique workspace names. If a workspace is defined more than once, the latest one overrides the previous ones. Used with local or distributed file systems.</td>
+    <td>One or more unique workspace names. If a workspace name is used more than once, only the last definition is effective. </td>
   </tr>
   <tr>
     <td>"workspaces". . . "location"</td>
@@ -116,7 +114,7 @@ The following table describes the attributes you configure for storage plugins.
     <td>"formats" . . . "escape"</td>
     <td>"`"</td>
     <td>no</td>
-    <td>A single character that escapes the `quote` character.</td>
+    <td>A single character that escapes the quote character.</td>
   </tr>
   <tr>
     <td>"formats" . . . "comment"</td>


[05/14] drill git commit: Sudheesh's review changes

Posted by ts...@apache.org.
Sudheesh's review changes


Project: http://git-wip-us.apache.org/repos/asf/drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/drill/commit/9c24b342
Tree: http://git-wip-us.apache.org/repos/asf/drill/tree/9c24b342
Diff: http://git-wip-us.apache.org/repos/asf/drill/diff/9c24b342

Branch: refs/heads/gh-pages
Commit: 9c24b342cc5f18b781da58c04dee5fc2058a0393
Parents: 39a2b6b
Author: Kristine Hahn <kh...@maprtech.com>
Authored: Tue May 19 12:18:14 2015 -0700
Committer: Kristine Hahn <kh...@maprtech.com>
Committed: Tue May 19 12:18:14 2015 -0700

----------------------------------------------------------------------
 _docs/query-data/070-query-sys-tbl.md | 409 ++++++++++++++++++-----------
 1 file changed, 251 insertions(+), 158 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/drill/blob/9c24b342/_docs/query-data/070-query-sys-tbl.md
----------------------------------------------------------------------
diff --git a/_docs/query-data/070-query-sys-tbl.md b/_docs/query-data/070-query-sys-tbl.md
index 5cab6dc..0016e21 100644
--- a/_docs/query-data/070-query-sys-tbl.md
+++ b/_docs/query-data/070-query-sys-tbl.md
@@ -1,158 +1,251 @@
----
-title: "Querying System Tables"
-parent: "Query Data"
----
-Drill has a sys database that contains system tables. You can query the system
-tables for information about Drill, including Drill ports, the Drill version
-running on the system, and available Drill options. View the databases in
-Drill to identify the sys database, and then use the sys database to view
-system tables that you can query.
-
-## View Drill Databases
-
-Issue the `SHOW DATABASES` command to view Drill databases.
-
-    0: jdbc:drill:zk=10.10.100.113:5181> show databases;
-    +--------------------+
-    |      SCHEMA_NAME   |
-    +--------------------+
-    | M7                 |
-    | hive.default       |
-    | dfs.default        |
-    | dfs.root           |
-    | dfs.views          |
-    | dfs.tmp            |
-    | dfs.tpcds          |
-    | sys                |
-    | cp.default         |
-    | hbase              |
-    | INFORMATION_SCHEMA |
-    +--------------------+
-    11 rows selected (0.162 seconds)
-
-Drill returns `sys` in the database results.
-
-## Use the Sys Database
-
-Issue the `USE` command to select the sys database for subsequent SQL
-requests.
-
-    0: jdbc:drill:zk=10.10.100.113:5181> use sys;
-    +------------+--------------------------------+
-    |   ok     |  summary                         |
-    +------------+--------------------------------+
-    | true     | Default schema changed to 'sys'  |
-    +------------+--------------------------------+
-    1 row selected (0.101 seconds)
-
-## View Tables
-
-Issue the `SHOW TABLES` command to view the tables in the sys database.
-
-    0: jdbc:drill:zk=10.10.100.113:5181> show tables;
-    +--------------+------------+
-    | TABLE_SCHEMA | TABLE_NAME |
-    +--------------+------------+
-    | sys          | drillbits  |
-    | sys          | version    |
-    | sys          | options    |
-    +--------------+------------+
-    3 rows selected (0.934 seconds)
-    0: jdbc:drill:zk=10.10.100.113:5181>
-
-## Query System Tables
-
-Query the drillbits, version, and options tables in the sys database.
-
-###Query the drillbits table.
-
-    0: jdbc:drill:zk=10.10.100.113:5181> select * from drillbits;
-    +-------------------+------------+--------------+------------+---------+
-    |   host            |  user_port | control_port | data_port  |  current|
-    +-------------------+------------+--------------+------------+--------+
-    | qa-node115.qa.lab | 31010      | 31011        | 31012      | true    |
-    | qa-node114.qa.lab | 31010      | 31011        | 31012      | false   |
-    | qa-node116.qa.lab | 31010      | 31011        | 31012      | false   |
-    +-------------------+------------+--------------+------------+---------+
-    3 rows selected (0.146 seconds)
-
-  * host   
-The name of the node running the Drillbit service.
-  * user-port  
-The user port address, used between nodes in a cluster for connecting to
-external clients and for the Drill Web UI.  
-  * control_port  
-The control port address, used between nodes for multi-node installation of
-Apache Drill.
-  * data_port  
-The data port address, used between nodes for multi-node installation of
-Apache Drill.
-  * current  
-True means the Drillbit is connected to the session or client running the
-query. This Drillbit is the Foreman for the current session.  
-
-### Query the version table.
-
-    0: jdbc:drill:zk=10.10.100.113:5181> select * from version;
-    +-------------------------------------------+--------------------------------------------------------------------+----------------------------+--------------+----------------------------+
-    |                 commit_id                 |                           commit_message                           |        commit_time         | build_email  |         build_time         |
-    +-------------------------------------------+--------------------------------------------------------------------+----------------------------+--------------+----------------------------+
-    | d8b19759657698581cc0d01d7038797952888123  | DRILL-3100: TestImpersonationDisabledWithMiniDFS fails on Windows  | 15.05.2015 @ 05:18:03 UTC  | Unknown      | 15.05.2015 @ 06:52:32 UTC  |
-    +-------------------------------------------+--------------------------------------------------------------------+----------------------------+--------------+----------------------------+
-    1 row selected (0.099 seconds)
-  * commit_id  
-The github id of the release you are running. For example, <https://github.com
-/apache/drill/commit/e3ab2c1760ad34bda80141e2c3108f7eda7c9104>
-  * commit_message  
-The message explaining the change.
-  * commit_time  
-The date and time of the change.
-  * build_email  
-The email address of the person who made the change, which is unknown in this
-example.
-  * build_time  
-The time that the release was built.
-
-### Query the options table.
-
-Drill provides system, session, and boot options that you can query.
-
-The following example shows a query on the system options:
-
-    0: jdbc:drill:zk=10.10.100.113:5181> select * from options where type='SYSTEM' limit 10;
-    +-------------------------------------------------+----------+---------+----------+-------------+-------------+-----------+------------+
-    |                      name                       |   kind   |  type   |  status  |   num_val   | string_val  | bool_val  | float_val  |
-    +-------------------------------------------------+----------+---------+----------+-------------+-------------+-----------+------------+
-    | drill.exec.functions.cast_empty_string_to_null  | BOOLEAN  | SYSTEM  | DEFAULT  | null        | null        | false     | null       |
-    | drill.exec.storage.file.partition.column.label  | STRING   | SYSTEM  | DEFAULT  | null        | dir         | null      | null       |
-    | exec.errors.verbose                             | BOOLEAN  | SYSTEM  | DEFAULT  | null        | null        | false     | null       |
-    | exec.java_compiler                              | STRING   | SYSTEM  | DEFAULT  | null        | DEFAULT     | null      | null       |
-    | exec.java_compiler_debug                        | BOOLEAN  | SYSTEM  | DEFAULT  | null        | null        | true      | null       |
-    | exec.java_compiler_janino_maxsize               | LONG     | SYSTEM  | DEFAULT  | 262144      | null        | null      | null       |
-    | exec.max_hash_table_size                        | LONG     | SYSTEM  | DEFAULT  | 1073741824  | null        | null      | null       |
-    | exec.min_hash_table_size                        | LONG     | SYSTEM  | DEFAULT  | 65536       | null        | null      | null       |
-    | exec.queue.enable                               | BOOLEAN  | SYSTEM  | DEFAULT  | null        | null        | false     | null       |
-    | exec.queue.large                                | LONG     | SYSTEM  | DEFAULT  | 10          | null        | null      | null       |
-    +-------------------------------------------------+----------+---------+----------+-------------+-------------+-----------+------------+
-    10 rows selected (0.216 seconds)
-
-  * name  
-The name of the option.
-  * kind  
-The data type of the option value.
-  * type  
-The type of options in the output: system, session, or boot.
-  * num_val  
-The default value, which is of the long or int data type; otherwise, null.
-  * string_val  
-The default value, which is a string; otherwise, null.
-  * bool_val  
-The default value, which is true or false; otherwise, null.
-  * float_val  
-The default value, which is of the double, float, or long double data type;
-otherwise, null.
-
-For information about how to configure Drill system and session options, see [Planning and Execution Options]({{ site.baseurl }}/docs/planning-and-execution-options).
-
-For information about how to configure Drill start-up options, see [Start-Up Options]({{ site.baseurl }}/docs/start-up-options).
-
+---
+title: "Querying System Tables"
+parent: "Query Data"
+---
+Drill has a sys database that contains system tables. You can query the system
+tables for information about Drill, including Drill ports, the Drill version
+running on the system, and available Drill options. View the databases in
+Drill to identify the sys database, and then use the sys database to view
+system tables that you can query.
+
+## View Drill Databases
+
+Issue the `SHOW DATABASES` command to view Drill databases.
+
+    0: jdbc:drill:zk=10.10.100.113:5181> show databases;
+    +--------------------+
+    |      SCHEMA_NAME   |
+    +--------------------+
+    | M7                 |
+    | hive.default       |
+    | dfs.default        |
+    | dfs.root           |
+    | dfs.views          |
+    | dfs.tmp            |
+    | dfs.tpcds          |
+    | sys                |
+    | cp.default         |
+    | hbase              |
+    | INFORMATION_SCHEMA |
+    +--------------------+
+    11 rows selected (0.162 seconds)
+
+Drill returns `sys` in the database results.
+
+## Use the Sys Database
+
+Issue the `USE` command to select the sys database for subsequent SQL
+requests.
+
+    0: jdbc:drill:zk=10.10.100.113:5181> use sys;
+    +-------+----------------------------------+
+    |  ok   |             summary              |
+    +-------+----------------------------------+
+    | true  | Default schema changed to [sys]  |
+    +-------+----------------------------------+
+    1 row selected (0.101 seconds)
+
+## View Tables
+
+Issue the `SHOW TABLES` command to view the tables in the sys database.
+
+    0: jdbc:drill:zk=10.10.100.113:5181> show tables;
+    +---------------+-------------+
+    | TABLE_SCHEMA  | TABLE_NAME  |
+    +---------------+-------------+
+    | sys           | boot        |
+    | sys           | drillbits   |
+    | sys           | memory      |
+    | sys           | options     |
+    | sys           | threads     |
+    | sys           | version     |
+    +---------------+-------------+
+    3 rows selected (0.934 seconds)
+    0: jdbc:drill:zk=10.10.100.113:5181>
+
+## Query System Tables
+
+Query the drillbits, version, options, boot, threads, and memory tables in the sys database.
+
+###Query the drillbits table.
+
+    0: jdbc:drill:zk=10.10.100.113:5181> select * from drillbits;
+    +-------------------+------------+--------------+------------+---------+
+    |   hostname        |  user_port | control_port | data_port  |  current|
+    +-------------------+------------+--------------+------------+--------+
+    | qa-node115.qa.lab | 31010      | 31011        | 31012      | true    |
+    | qa-node114.qa.lab | 31010      | 31011        | 31012      | false   |
+    | qa-node116.qa.lab | 31010      | 31011        | 31012      | false   |
+    +-------------------+------------+--------------+------------+---------+
+    3 rows selected (0.146 seconds)
+
+  * hostname   
+The name of the node running the Drillbit service.
+  * user_port  
+The user port address, used between nodes in a cluster for connecting to
+external clients and for the Drill Web UI.  
+  * control_port  
+The control port address, used between nodes for multi-node installation of
+Apache Drill.
+  * data_port  
+The data port address, used between nodes for multi-node installation of
+Apache Drill.
+  * current  
+True means the Drillbit is connected to the session or client running the
+query. This Drillbit is the Foreman for the current session.  
+
+### Query the version table.
+
+    0: jdbc:drill:zk=10.10.100.113:5181> select * from version;
+    +-------------------------------------------+--------------------------------------------------------------------+----------------------------+--------------+----------------------------+
+    |                 commit_id                 |                           commit_message                           |        commit_time         | build_email  |         build_time         |
+    +-------------------------------------------+--------------------------------------------------------------------+----------------------------+--------------+----------------------------+
+    | d8b19759657698581cc0d01d7038797952888123  | DRILL-3100: TestImpersonationDisabledWithMiniDFS fails on Windows  | 15.05.2015 @ 05:18:03 UTC  | Unknown      | 15.05.2015 @ 06:52:32 UTC  |
+    +-------------------------------------------+--------------------------------------------------------------------+----------------------------+--------------+----------------------------+
+    1 row selected (0.099 seconds)
+
+  * commit_id  
+The github id of the release you are running. For example, <https://github.com
+/apache/drill/commit/e3ab2c1760ad34bda80141e2c3108f7eda7c9104>
+  * commit_message  
+The message explaining the change.
+  * commit_time  
+The date and time of the change.
+  * build_email  
+The email address of the person who made the change, which is unknown in this
+example.
+  * build_time  
+The time that the release was built.
+
+### Query the options table.
+
+Drill provides system, session, and boot options that you can query.
+
+The following example shows a query on the system options:
+
+    0: jdbc:drill:zk=10.10.100.113:5181> select * from options where type='SYSTEM' limit 10;
+    +-------------------------------------------------+----------+---------+----------+-------------+-------------+-----------+------------+
+    |                      name                       |   kind   |  type   |  status  |   num_val   | string_val  | bool_val  | float_val  |
+    +-------------------------------------------------+----------+---------+----------+-------------+-------------+-----------+------------+
+    | drill.exec.functions.cast_empty_string_to_null  | BOOLEAN  | SYSTEM  | DEFAULT  | null        | null        | false     | null       |
+    | drill.exec.storage.file.partition.column.label  | STRING   | SYSTEM  | DEFAULT  | null        | dir         | null      | null       |
+    | exec.errors.verbose                             | BOOLEAN  | SYSTEM  | DEFAULT  | null        | null        | false     | null       |
+    | exec.java_compiler                              | STRING   | SYSTEM  | DEFAULT  | null        | DEFAULT     | null      | null       |
+    | exec.java_compiler_debug                        | BOOLEAN  | SYSTEM  | DEFAULT  | null        | null        | true      | null       |
+    | exec.java_compiler_janino_maxsize               | LONG     | SYSTEM  | DEFAULT  | 262144      | null        | null      | null       |
+    | exec.max_hash_table_size                        | LONG     | SYSTEM  | DEFAULT  | 1073741824  | null        | null      | null       |
+    | exec.min_hash_table_size                        | LONG     | SYSTEM  | DEFAULT  | 65536       | null        | null      | null       |
+    | exec.queue.enable                               | BOOLEAN  | SYSTEM  | DEFAULT  | null        | null        | false     | null       |
+    | exec.queue.large                                | LONG     | SYSTEM  | DEFAULT  | 10          | null        | null      | null       |
+    +-------------------------------------------------+----------+---------+----------+-------------+-------------+-----------+------------+
+    10 rows selected (0.216 seconds)
+
+  * name  
+The name of the option.
+  * kind  
+The data type of the option value.
+  * type  
+The type of options in the output: system or session.
+  * status
+The status of the option: default or changed.
+  * num_val  
+The default value, which is of the long or int data type; otherwise, null.
+  * string_val  
+The default value, which is a string; otherwise, null.
+  * bool_val  
+The default value, which is true or false; otherwise, null.
+  * float_val  
+The default value, which is of the double, float, or long double data type;
+otherwise, null.
+
+### Query the boot table.
+
+    0: jdbc:drill:zk=10.10.100.113:5181> select * from boot limit 10;
+    +--------------------------------------+----------+-------+---------+------------+-------------------------+-----------+------------+
+    |                 name                 |   kind   | type  | status  |  num_val   |       string_val        | bool_val  | float_val  |
+    +--------------------------------------+----------+-------+---------+------------+-------------------------+-----------+------------+
+    | awt.toolkit                          | STRING   | BOOT  | BOOT    | null       | "sun.awt.X11.XToolkit"  | null      | null       |
+    | drill.client.supports-complex-types  | BOOLEAN  | BOOT  | BOOT    | null       | null                    | true      | null       |
+    | drill.exec.buffer.size               | STRING   | BOOT  | BOOT    | null       | "6"                     | null      | null       |
+    | drill.exec.buffer.spooling.delete    | BOOLEAN  | BOOT  | BOOT    | null       | null                    | true      | null       |
+    | drill.exec.buffer.spooling.size      | LONG     | BOOT  | BOOT    | 100000000  | null                    | null      | null       |
+    | drill.exec.cluster-id                | STRING   | BOOT  | BOOT    | null       | "SKCluster"             | null      | null       |
+    | drill.exec.compile.cache_max_size    | LONG     | BOOT  | BOOT    | 1000       | null                    | null      | null       |
+    | drill.exec.compile.compiler          | STRING   | BOOT  | BOOT    | null       | "DEFAULT"               | null      | null       |
+    | drill.exec.compile.debug             | BOOLEAN  | BOOT  | BOOT    | null       | null                    | true      | null       |
+    | drill.exec.compile.janino_maxsize    | LONG     | BOOT  | BOOT    | 262144     | null                    | null      | null       |
+    +--------------------------------------+----------+-------+---------+------------+-------------------------+-----------+------------+
+    10 rows selected (0.192 seconds)
+
+  * name  
+The name of the boot option.
+  * kind  
+The data type of the option value.
+  * type  
+This is always boot.
+  * status
+This is always boot.
+  * num_val  
+The default value, which is of the long or int data type; otherwise, null.
+  * string_val  
+The default value, which is a string; otherwise, null.
+  * bool_val  
+The default value, which is true or false; otherwise, null.
+  * float_val  
+The default value, which is of the double, float, or long double data type;
+otherwise, null.
+
+### Query the threads table.
+
+    0: jdbc:drill:zk=10.10.100.113:5181> select * from threads;
+    +--------------------+------------+----------------+---------------+
+    |       hostname     | user_port  | total_threads  | busy_threads  |
+    +--------------------+------------+----------------+---------------+
+    | qa-node115.qa.lab  | 31010      | 33             | 33            |
+    | qa-node114.qa.lab  | 31010      | 33             | 32            |
+    | qa-node116.qa.lab  | 31010      | 29             | 29            |
+    +--------------------+------------+----------------+---------------+
+    3 rows selected (0.618 seconds)
+
+  * hostname   
+The name of the node running the Drillbit service.
+  * user_port  
+The user port address, used between nodes in a cluster for connecting to
+external clients and for the Drill Web UI. 
+  * total_threads
+The peak thread count on the node.
+  * busy_threads
+The current number of live threads (daemon and non-daemon) on the node.
+
+### Query the memory table.
+
+    0: jdbc:drill:zk=10.10.100.113:5181> select * from memory;
+    +--------------------+------------+---------------+-------------+-----------------+---------------------+-------------+
+    |       hostname     | user_port  | heap_current  |  heap_max   | direct_current  | jvm_direct_current  | direct_max  |
+    +--------------------+------------+---------------+-------------+-----------------+---------------------+-------------+
+    | qa-node115.qa.lab  | 31010      | 443549712     | 4294967296  | 11798941        | 167772974           | 8589934592  |
+    | qa-node114.qa.lab  | 31010      | 149948432     | 4294967296  | 7750365         | 134218542           | 8589934592  |
+    | qa-node116.qa.lab  | 31010      | 358612992     | 4294967296  | 7750365         | 83886894            | 8589934592  |
+    +--------------------+------------+---------------+-------------+-----------------+---------------------+-------------+
+    3 rows selected (0.172 seconds)
+
+  * hostname   
+The name of the node running the Drillbit service.
+  * user_port  
+The user port address, used between nodes in a cluster for connecting to
+external clients and for the Drill Web UI.
+  * heap_current
+The amount of memory being used on the heap, in bytes.
+  * heap_max
+The maximum amount of memory available on the heap, in bytes.
+  * direct_current
+The current direct memory being used by the allocator, in bytes.
+  * jvm_direct_current
+The current JVM direct memory allocation, in bytes.
+  * direct_max
+The maximum direct memory available to the allocator, in bytes.
+
+For information about how to configure Drill system and session options, see [Planning and Execution Options]({{ site.baseurl }}/docs/planning-and-execution-options).
+
+For information about how to configure Drill start-up options, see [Start-Up Options]({{ site.baseurl }}/docs/start-up-options).
+


[07/14] drill git commit: BB added query profile tables

Posted by ts...@apache.org.
BB added query profile tables


Project: http://git-wip-us.apache.org/repos/asf/drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/drill/commit/8f7dcfac
Tree: http://git-wip-us.apache.org/repos/asf/drill/tree/8f7dcfac
Diff: http://git-wip-us.apache.org/repos/asf/drill/diff/8f7dcfac

Branch: refs/heads/gh-pages
Commit: 8f7dcfac254992b0687787e3cb642f960a2f8a40
Parents: f4968c3
Author: Kristine Hahn <kh...@maprtech.com>
Authored: Tue May 19 14:08:43 2015 -0700
Committer: Kristine Hahn <kh...@maprtech.com>
Committed: Tue May 19 14:08:43 2015 -0700

----------------------------------------------------------------------
 _data/docs.json                                 | 71 ++++++++++++++---
 .../060-query-profile-tables.md                 | 84 ++++++++++++++++++++
 2 files changed, 145 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/drill/blob/8f7dcfac/_data/docs.json
----------------------------------------------------------------------
diff --git a/_data/docs.json b/_data/docs.json
index 8185d14..76ac0f9 100644
--- a/_data/docs.json
+++ b/_data/docs.json
@@ -3239,8 +3239,8 @@
                         }
                     ], 
                     "children": [], 
-                    "next_title": "Log and Debug", 
-                    "next_url": "/docs/log-and-debug/", 
+                    "next_title": "Query Profile Column Descriptions", 
+                    "next_url": "/docs/query-profile-column-descriptions/", 
                     "parent": "Identifying Performance Issues", 
                     "previous_title": "Query Plans", 
                     "previous_url": "/docs/query-plans/", 
@@ -5323,8 +5323,8 @@
                                 }
                             ], 
                             "children": [], 
-                            "next_title": "Log and Debug", 
-                            "next_url": "/docs/log-and-debug/", 
+                            "next_title": "Query Profile Column Descriptions", 
+                            "next_url": "/docs/query-profile-column-descriptions/", 
                             "parent": "Identifying Performance Issues", 
                             "previous_title": "Query Plans", 
                             "previous_url": "/docs/query-plans/", 
@@ -5341,6 +5341,23 @@
                     "relative_path": "_docs/performance-tuning/050-identifying-performance-issues.md", 
                     "title": "Identifying Performance Issues", 
                     "url": "/docs/identifying-performance-issues/"
+                }, 
+                {
+                    "breadcrumbs": [
+                        {
+                            "title": "Performance Tuning", 
+                            "url": "/docs/performance-tuning/"
+                        }
+                    ], 
+                    "children": [], 
+                    "next_title": "Log and Debug", 
+                    "next_url": "/docs/log-and-debug/", 
+                    "parent": "Performance Tuning", 
+                    "previous_title": "Query Profiles", 
+                    "previous_url": "/docs/query-profiles/", 
+                    "relative_path": "_docs/performance-tuning/060-query-profile-tables.md", 
+                    "title": "Query Profile Column Descriptions", 
+                    "url": "/docs/query-profile-column-descriptions/"
                 }
             ], 
             "next_title": "Performance Tuning Introduction", 
@@ -6144,6 +6161,23 @@
             "title": "Query Plans and Tuning Introduction", 
             "url": "/docs/query-plans-and-tuning-introduction/"
         }, 
+        "Query Profile Column Descriptions": {
+            "breadcrumbs": [
+                {
+                    "title": "Performance Tuning", 
+                    "url": "/docs/performance-tuning/"
+                }
+            ], 
+            "children": [], 
+            "next_title": "Log and Debug", 
+            "next_url": "/docs/log-and-debug/", 
+            "parent": "Performance Tuning", 
+            "previous_title": "Query Profiles", 
+            "previous_url": "/docs/query-profiles/", 
+            "relative_path": "_docs/performance-tuning/060-query-profile-tables.md", 
+            "title": "Query Profile Column Descriptions", 
+            "url": "/docs/query-profile-column-descriptions/"
+        }, 
         "Query Profiles": {
             "breadcrumbs": [
                 {
@@ -6156,8 +6190,8 @@
                 }
             ], 
             "children": [], 
-            "next_title": "Log and Debug", 
-            "next_url": "/docs/log-and-debug/", 
+            "next_title": "Query Profile Column Descriptions", 
+            "next_url": "/docs/query-profile-column-descriptions/", 
             "parent": "Identifying Performance Issues", 
             "previous_title": "Query Plans", 
             "previous_url": "/docs/query-plans/", 
@@ -12578,8 +12612,8 @@
                                 }
                             ], 
                             "children": [], 
-                            "next_title": "Log and Debug", 
-                            "next_url": "/docs/log-and-debug/", 
+                            "next_title": "Query Profile Column Descriptions", 
+                            "next_url": "/docs/query-profile-column-descriptions/", 
                             "parent": "Identifying Performance Issues", 
                             "previous_title": "Query Plans", 
                             "previous_url": "/docs/query-plans/", 
@@ -12596,6 +12630,23 @@
                     "relative_path": "_docs/performance-tuning/050-identifying-performance-issues.md", 
                     "title": "Identifying Performance Issues", 
                     "url": "/docs/identifying-performance-issues/"
+                }, 
+                {
+                    "breadcrumbs": [
+                        {
+                            "title": "Performance Tuning", 
+                            "url": "/docs/performance-tuning/"
+                        }
+                    ], 
+                    "children": [], 
+                    "next_title": "Log and Debug", 
+                    "next_url": "/docs/log-and-debug/", 
+                    "parent": "Performance Tuning", 
+                    "previous_title": "Query Profiles", 
+                    "previous_url": "/docs/query-profiles/", 
+                    "relative_path": "_docs/performance-tuning/060-query-profile-tables.md", 
+                    "title": "Query Profile Column Descriptions", 
+                    "url": "/docs/query-profile-column-descriptions/"
                 }
             ], 
             "next_title": "Performance Tuning Introduction", 
@@ -12613,8 +12664,8 @@
             "next_title": "Query Audit Logging", 
             "next_url": "/docs/query-audit-logging/", 
             "parent": "", 
-            "previous_title": "Query Profiles", 
-            "previous_url": "/docs/query-profiles/", 
+            "previous_title": "Query Profile Column Descriptions", 
+            "previous_url": "/docs/query-profile-column-descriptions/", 
             "relative_path": "_docs/073-log-and-debug.md", 
             "title": "Log and Debug", 
             "url": "/docs/log-and-debug/"

http://git-wip-us.apache.org/repos/asf/drill/blob/8f7dcfac/_docs/performance-tuning/060-query-profile-tables.md
----------------------------------------------------------------------
diff --git a/_docs/performance-tuning/060-query-profile-tables.md b/_docs/performance-tuning/060-query-profile-tables.md
new file mode 100644
index 0000000..009516c
--- /dev/null
+++ b/_docs/performance-tuning/060-query-profile-tables.md
@@ -0,0 +1,84 @@
+---
+title: "Query Profile Column Descriptions"
+parent: "Performance Tuning"
+--- 
+
+The following tables provide descriptions listed in each of the tables for a query profile.  
+
+
+## Fragment Overview  Table  
+
+Shows aggregate metrics for each major fragment that executed the query.
+
+The following table lists descriptions for each column in the Fragment Overview  
+table:  
+
+| Column Name               | Description                                                                                                                                                                 |
+|---------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| Major Fragment ID         | The coordinate ID of the major fragment. For example, 03-xx-xx where 03 is the major fragment ID followed by xx-xx, which represents the minor fragment ID and operator ID. |
+| Minor Fragments Reporting | The number of minor fragments that Drill parallelized for the major fragment.                                                                                               |
+| First Start               | The total time before the first minor fragment started its task.                                                                                                            |
+| Last Start                | The total time before the last minor fragment started its task.                                                                                                             |
+| First End                 | The total time for the first minor fragment to finish its task.                                                                                                             |
+| Last End                  | The total time for the last minor fragment to finish its task.                                                                                                              |
+| Min Runtime               | The minimum of the total amount of time spent by minor fragments to complete their tasks.                                                                                   |
+| Avg Runtime               | The average of the total amount of time spent by minor fragments to complete their tasks.                                                                                   |
+| Max Runtime               | The maximum of the total amount of time spent by minor fragments to complete their tasks.                                                                                   |
+| Last Update               | The last time one of the minor fragments sent a status update to the Foreman. Time is shown in 24-hour notation.                                                            |
+| Last Progress             | The last time one of the minor fragments made progress, such as a change in fragment state or read data from disk. Time is shown in 24-hour notation.                       |
+| Max Peak Memory           | The maximum of the peak direct memory allocated to any minor fragment.                                                                                                      |
+
+## Major Fragment Block  
+
+Shows metrics for the minor fragments that were parallelized for each major fragment.  
+
+The following table lists descriptions for each column in a major fragment block:  
+
+| Column Name       | Description                                                                                                                                                                                                        |
+|-------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| Minor Fragment ID | The coordinate ID of the minor fragment that was parallelized from the major fragment. For example, 02-03-xx where 02 is the Major Fragment ID, 03 is the Minor Fragment ID, and xx corresponds to an operator ID. |
+| Host              | The node on which the minor fragment carried out its task.                                                                                                                                                         |
+| Start             | The amount of time passed before the minor fragment started its task.                                                                                                                                              |
+| End               | The amount of time passed before the minor fragment finished its task.                                                                                                                                             |
+| Runtime           | The duration of time for the fragment to complete a task. This value equals the difference between End and Start time.                                                                                             |
+| Max Records       | The maximum number of records consumed by an operator from a single input stream.                                                                                                                                  |
+| Max Batches       | The maximum number of input batches across input streams, operators, and minor fragments.                                                                                                                          |
+| Last Update       | The last time this fragment sent a status update to the Foreman. Time is shown in 24-hour notation.                                                                                                                |
+| Last Progress     | The last time this fragment made progress, such as a change in fragment state or reading data from disk. Time is shown in 24-hour notation.                                                                        |
+| Peak Memory       | The peak direct memory allocated during execution for this minor fragment.                                                                                                                                         |
+| State             | The status of the minor fragment; either finished, running, cancelled, or failed.                                                                                                                                  |
+
+
+## Operator Overview  Table  
+
+Shows aggregate metrics for each operator within a major fragment that performed relational operations during query execution.
+ 
+The following table lists descriptions for each column in the Operator Overview table:
+
+| Column Name                                          | Description                                                                                                                                                                                                                   |
+|------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| Operator ID                                          | The coordinates of an operator that performed an operation during a particular phase of the query. For example, 02-xx-03 where 02 is the Major Fragment ID, xx corresponds to a Minor Fragment ID, and 03 is the Operator ID. |
+| Type                                                 | The operator type. Operators can be of type project, filter, hash join, single sender, or unordered receiver.                                                                                                                 |
+| Min Setup Time, Avg Setup Time, Max Setup Time       | The minimum, average, and maximum amount of time spent by the operator to set up before performing the operation.                                                                                                             |
+| Min Process Time, Avg Process Time, Max Process Time | The minimum, average, and maximum  amount of time spent by the operator to perform the operation.                                                                                                                             |
+| Wait (min, avg, max)                                 | These fields represent the minimum, average,  and maximum cumulative times spent by operators waiting for external resources.                                                                                                 |
+| Avg Peak Memory                                      | Represents the average of the peak direct memory allocated across minor fragments. Relates to the memory needed by operators to perform their operations, such as hash join or sort.                                          |
+| Max Peak Memory                                      | Represents the maximum of the peak direct memory allocated across minor fragments. Relates to the memory needed by operators to perform their operations, such as  hash join or sort.                                         |  
+
+## Operator Block  
+
+Shows time and memory metrics for each operator type within a major fragment.  
+
+The following table provides descriptions for each column presented in the operator block:  
+
+| Column Name    | Description                                                                                                                                                                                              |
+|----------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| Minor Fragment | The coordinate ID of the minor fragment on which the operator ran. For example, 04-03-01 where 04 is the Major Fragment ID, 03 is the Minor Fragment ID, and 01 is the Operator ID.                      |
+| Setup Time     | The amount of time spent by the operator to set up before performing its operation. This includes run-time code generation and opening a file.                                                           |
+| Process Time   | The amount of time spent by the operator to perform its operation.                                                                                                                                       |
+| Wait Time      | The cumulative amount of time spent by an operator waiting for external resources. such as waiting to send records, waiting to receive records, waiting to write to disk, and waiting to read from disk. |
+| Max Batches    | The maximum number of record batches consumed from a single input stream.                                                                                                                                |
+| Max Records    | The maximum number of records consumed from a single input stream.                                                                                                                                       |
+| Peak Memory    | Represents the peak direct memory allocated. Relates to the memory needed by the operators to perform their operations, such as  hash join and sort.                                                     |  
+
+


[14/14] drill git commit: escape chars hiding bracketed info > back ticks

Posted by ts...@apache.org.
escape chars hiding bracketed info > back ticks


Project: http://git-wip-us.apache.org/repos/asf/drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/drill/commit/0120d252
Tree: http://git-wip-us.apache.org/repos/asf/drill/tree/0120d252
Diff: http://git-wip-us.apache.org/repos/asf/drill/diff/0120d252

Branch: refs/heads/gh-pages
Commit: 0120d252a3ec1e8edbb9e01fc7f61c4dc8cae5b5
Parents: 7fb98c5
Author: Kristine Hahn <kh...@maprtech.com>
Authored: Tue May 19 22:58:27 2015 -0700
Committer: Kristine Hahn <kh...@maprtech.com>
Committed: Tue May 19 22:58:27 2015 -0700

----------------------------------------------------------------------
 .../identifying-performance-issues/010-query-plans.md        | 8 ++++----
 .../identifying-performance-issues/020-query-profiles.md     | 6 +++---
 2 files changed, 7 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/drill/blob/0120d252/_docs/performance-tuning/identifying-performance-issues/010-query-plans.md
----------------------------------------------------------------------
diff --git a/_docs/performance-tuning/identifying-performance-issues/010-query-plans.md b/_docs/performance-tuning/identifying-performance-issues/010-query-plans.md
index 7332322..b08435b 100755
--- a/_docs/performance-tuning/identifying-performance-issues/010-query-plans.md
+++ b/_docs/performance-tuning/identifying-performance-issues/010-query-plans.md
@@ -8,7 +8,7 @@ If you experience performance issues in Drill, you can typically identify the so
 
 Drill has an optimizer and a parallelizer that work together to plan a query. Drill creates logical, physical, and execution plans based on the available statistics for an associated set of files or data sources. The number of running Drill nodes and configured runtime settings contribute to how Drill plans and executes a query.
  
-You can use [EXPLAIN commands]({{ site.baseurl }}/docs/explain-commands/) to view the logical and physical plans for a query, however you cannot view the execution plan. To see how Drill executed a query, you can view the query profile in the Drill Web UI at <drill_node_ip_address>:8047.
+You can use [EXPLAIN commands]({{ site.baseurl }}/docs/explain-commands/) to view the logical and physical plans for a query, however you cannot view the execution plan. To see how Drill executed a query, you can view the query profile in the Drill Web UI at `<drill_node_ip_address>:8047`.
 
 ### Logical Plan  
 
@@ -32,7 +32,7 @@ The physical plan shows the major fragments and specific operators with correlat
  
 The physical plan displays the IDs in the following format:
  
-<MajorFragmentID\> - <OperatorID\>
+`<MajorFragmentID> - <OperatorID>`
  
 For example, 00-02 where 00 is the MajorFragmentID and 02 is is the OperatorID.
  
@@ -64,9 +64,9 @@ You can test the performance of a physical plan that Drill generates, modify the
  
 To modify and re-submit a physical plan to Drill, complete the following steps:  
 
-1. Run EXPLAIN PLAN FOR <query\> to see the physical plan for your query.  
+1. Run EXPLAIN PLAN FOR `<query>` to see the physical plan for your query.  
 2. Copy the JSON output of the physical plan, and modify as needed.  
-3. Navigate to the Drill Web UI at <drill\_node\_ip_address\>:8047.  
+3. Navigate to the Drill Web UI at `<drill_node_ip_address>:8047`.  
 4. Select **Query** in the menu bar.  
 ![]({{ site.baseurl }}/docs/img/submit_plan.png)  
 

http://git-wip-us.apache.org/repos/asf/drill/blob/0120d252/_docs/performance-tuning/identifying-performance-issues/020-query-profiles.md
----------------------------------------------------------------------
diff --git a/_docs/performance-tuning/identifying-performance-issues/020-query-profiles.md b/_docs/performance-tuning/identifying-performance-issues/020-query-profiles.md
index a4908f7..badf486 100755
--- a/_docs/performance-tuning/identifying-performance-issues/020-query-profiles.md
+++ b/_docs/performance-tuning/identifying-performance-issues/020-query-profiles.md
@@ -5,7 +5,7 @@ parent: "Identifying Performance Issues"
 
 A profile is a summary of metrics collected for each query that Drill executes. Query profiles provide information that you can use to monitor and analyze query performance. Drill creates a query profile from major, minor, operator, and input stream profiles. Each major fragment profile consists of a list of minor fragment profiles. Each minor fragment profile consists of a list of operator profiles. An operator profile consists of a list of input stream profiles. 
 
-You can view aggregate statistics across profile lists in the Profile tab of the Drill Web UI at <drill\_node\_ip_address\>:8047. You can modify and resubmit queries, or cancel queries. For debugging purposes, you can use profiles in conjunction with Drill logs. See Log and Debug.
+You can view aggregate statistics across profile lists in the Profile tab of the Drill Web UI at `<drill_node_ip_address>:8047`. You can modify and resubmit queries, or cancel queries. For debugging purposes, you can use profiles in conjunction with Drill logs. See Log and Debug.
  
 Metrics in a query profile are associated with a coordinate system of IDs. Drill uses a coordinate system comprised of query, fragment, and operator identifiers to track query execution activities and resources. Drill assigns a unique QueryID to each query received and then assigns IDs to each fragment and operator that executes the query.
  
@@ -19,7 +19,7 @@ Fragment and operator IDs:
 
 ## Viewing a Query Profile  
 
-When you select the Profiles tab in the Drill Web UI at <drill\_node_ip\_address\>:8047, you see a list of the last 100 queries than have run or that are currently running in the cluster.  
+When you select the Profiles tab in the Drill Web UI at `<drill_node_ip_address>:8047`, you see a list of the last 100 queries than have run or that are currently running in the cluster.  
 
 ![]({{ site.baseurl }}/docs/img/list_queries.png)
 
@@ -124,7 +124,7 @@ You may want to cancel a query if it hangs or causes performance bottlenecks. Yo
  
 To cancel a query from the Drill Web UI, complete the following steps:  
 
-1. Navigate to the Drill Web UI at <drill\_node_ip\_address\>:8047.
+1. Navigate to the Drill Web UI at `<drill_node_ip_address>:8047`.
 The Drill node from which you access the Drill Web UI must have an active Drillbit running.
 2. Select Profiles in the toolbar.
 A list of running and completed queries appears.


[04/14] drill git commit: edit release notes

Posted by ts...@apache.org.
edit release notes


Project: http://git-wip-us.apache.org/repos/asf/drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/drill/commit/39a2b6bf
Tree: http://git-wip-us.apache.org/repos/asf/drill/tree/39a2b6bf
Diff: http://git-wip-us.apache.org/repos/asf/drill/diff/39a2b6bf

Branch: refs/heads/gh-pages
Commit: 39a2b6bf4ecacb8ef8bc123ee51a1e26fe22a336
Parents: cdb566c
Author: Kristine Hahn <kh...@maprtech.com>
Authored: Tue May 19 12:01:03 2015 -0700
Committer: Kristine Hahn <kh...@maprtech.com>
Committed: Tue May 19 12:01:03 2015 -0700

----------------------------------------------------------------------
 _docs/rn/080-1.0.0-rn.md | 2 --
 1 file changed, 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/drill/blob/39a2b6bf/_docs/rn/080-1.0.0-rn.md
----------------------------------------------------------------------
diff --git a/_docs/rn/080-1.0.0-rn.md b/_docs/rn/080-1.0.0-rn.md
index 5755568..73fd453 100755
--- a/_docs/rn/080-1.0.0-rn.md
+++ b/_docs/rn/080-1.0.0-rn.md
@@ -3,8 +3,6 @@ title: "Apache Drill 1.0.0 Release Notes"
 parent: "Release Notes"
 ---
  Today we're happy to announce the availability of Drill 1.0.0, providing additional enhancements and bug fixes. This release includes the following new features, enhancements, and bug fixes:
-
-        Release Notes - Apache Drill - Version 1.0.0
     
 <h2>        Sub-task
 </h2>


[13/14] drill git commit: remove incomplete examples

Posted by ts...@apache.org.
remove incomplete examples


Project: http://git-wip-us.apache.org/repos/asf/drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/drill/commit/7fb98c54
Tree: http://git-wip-us.apache.org/repos/asf/drill/tree/7fb98c54
Diff: http://git-wip-us.apache.org/repos/asf/drill/diff/7fb98c54

Branch: refs/heads/gh-pages
Commit: 7fb98c54cd75c9822f265a1f107a2514712d2893
Parents: a37c347
Author: Kristine Hahn <kh...@maprtech.com>
Authored: Tue May 19 21:38:55 2015 -0700
Committer: Kristine Hahn <kh...@maprtech.com>
Committed: Tue May 19 21:38:55 2015 -0700

----------------------------------------------------------------------
 .../035-plugin-configuration-introduction.md    | 60 --------------------
 1 file changed, 60 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/drill/blob/7fb98c54/_docs/connect-a-data-source/035-plugin-configuration-introduction.md
----------------------------------------------------------------------
diff --git a/_docs/connect-a-data-source/035-plugin-configuration-introduction.md b/_docs/connect-a-data-source/035-plugin-configuration-introduction.md
index 1ec6461..ccc68d9 100644
--- a/_docs/connect-a-data-source/035-plugin-configuration-introduction.md
+++ b/_docs/connect-a-data-source/035-plugin-configuration-introduction.md
@@ -164,66 +164,6 @@ The following example of a storage plugin for reading CSV files with the new tex
       "delimiter": "," 
     },  
 
-### How Formats Affect Output
-
-The following self-explanatory examples show how the output of queries look using different formats:
-
-    SELECT * FROM dfs.`/tmp/csv_no_header.csv`;
-    +------------------------+
-    |        columns         |
-    +------------------------+
-    | ["hello","1","2","3"]  |
-    | ["hello","1","2","3"]  |
-    | ["hello","1","2","3"]  |
-    | ["hello","1","2","3"]  |
-    | ["hello","1","2","3"]  |
-    | ["hello","1","2","3"]  |
-    | ["hello","1","2","3"]  |
-    +------------------------+
-    7 rows selected (0.112 seconds)
-
-    SELECT * FROM dfs.`/tmp/csv_with_comments.csv2`;
-    +------------------------+
-    |        columns         |
-    +------------------------+
-    | ["hello","1","2","3"]  |
-    | ["hello","1","2","3"]  |
-    | ["hello","1","2","3"]  |
-    | ["hello","1","2","3"]  |
-    | ["hello","1","2","3"]  |
-    | ["hello","1","2","3"]  |
-    | ["hello","1","2","3"]  |
-    +------------------------+
-    7 rows selected (0.111 seconds)
-
-    SELECT * FROM dfs.`/tmp/csv_with_escape.csv`;
-    +------------------------------------------------------------------------+
-    |                                columns                                 |
-    +------------------------------------------------------------------------+
-    | ["hello","1","2","3 \" double quote is the default escape character"]  |
-    | ["hello","1","2","3"]                                                  |
-    | ["hello","1","2","3"]                                                  |
-    | ["hello","1","2","3"]                                                  |
-    | ["hello","1","2","3"]                                                  |
-    | ["hello","1","2","3"]                                                  |
-    | ["hello","1","2","3"]                                                  |
-    +------------------------------------------------------------------------+
-    7 rows selected (0.104 seconds)
-
-    SELECT * FROM dfs.`/tmp/csv_with_header.csv2`;
-    +------------------------+
-    |        columns         |
-    +------------------------+
-    | ["hello","1","2","3"]  |
-    | ["hello","1","2","3"]  |
-    | ["hello","1","2","3"]  |
-    | ["hello","1","2","3"]  |
-    | ["hello","1","2","3"]  |
-    | ["hello","1","2","3"]  |
-    | ["hello","1","2","3"]  |
-    +------------------------+
-
-
 ## Using Other Attributes
 
 The configuration of other attributes, such as `size.calculator.enabled` in the hbase plugin and `configProps` in the hive plugin, are implementation-dependent and beyond the scope of this document.