You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@impala.apache.org by jo...@apache.org on 2018/05/10 21:48:57 UTC

[5/8] impala git commit: IMPALA-6999: Upgrade to sqlparse-0.1.19 for Impala shell

http://git-wip-us.apache.org/repos/asf/impala/blob/49413d9c/shell/ext-py/sqlparse-0.1.14/tests/test_tokenize.py
----------------------------------------------------------------------
diff --git a/shell/ext-py/sqlparse-0.1.14/tests/test_tokenize.py b/shell/ext-py/sqlparse-0.1.14/tests/test_tokenize.py
deleted file mode 100644
index 0b23fa8..0000000
--- a/shell/ext-py/sqlparse-0.1.14/tests/test_tokenize.py
+++ /dev/null
@@ -1,190 +0,0 @@
-# -*- coding: utf-8 -*-
-
-import sys
-import types
-import unittest
-
-import pytest
-
-import sqlparse
-from sqlparse import lexer
-from sqlparse import sql
-from sqlparse.tokens import *
-
-
-class TestTokenize(unittest.TestCase):
-
-    def test_simple(self):
-        s = 'select * from foo;'
-        stream = lexer.tokenize(s)
-        self.assert_(isinstance(stream, types.GeneratorType))
-        tokens = list(stream)
-        self.assertEqual(len(tokens), 8)
-        self.assertEqual(len(tokens[0]), 2)
-        self.assertEqual(tokens[0], (Keyword.DML, u'select'))
-        self.assertEqual(tokens[-1], (Punctuation, u';'))
-
-    def test_backticks(self):
-        s = '`foo`.`bar`'
-        tokens = list(lexer.tokenize(s))
-        self.assertEqual(len(tokens), 3)
-        self.assertEqual(tokens[0], (Name, u'`foo`'))
-
-    def test_linebreaks(self):  # issue1
-        s = 'foo\nbar\n'
-        tokens = lexer.tokenize(s)
-        self.assertEqual(''.join(str(x[1]) for x in tokens), s)
-        s = 'foo\rbar\r'
-        tokens = lexer.tokenize(s)
-        self.assertEqual(''.join(str(x[1]) for x in tokens), s)
-        s = 'foo\r\nbar\r\n'
-        tokens = lexer.tokenize(s)
-        self.assertEqual(''.join(str(x[1]) for x in tokens), s)
-        s = 'foo\r\nbar\n'
-        tokens = lexer.tokenize(s)
-        self.assertEqual(''.join(str(x[1]) for x in tokens), s)
-
-    def test_inline_keywords(self):  # issue 7
-        s = "create created_foo"
-        tokens = list(lexer.tokenize(s))
-        self.assertEqual(len(tokens), 3)
-        self.assertEqual(tokens[0][0], Keyword.DDL)
-        self.assertEqual(tokens[2][0], Name)
-        self.assertEqual(tokens[2][1], u'created_foo')
-        s = "enddate"
-        tokens = list(lexer.tokenize(s))
-        self.assertEqual(len(tokens), 1)
-        self.assertEqual(tokens[0][0], Name)
-        s = "join_col"
-        tokens = list(lexer.tokenize(s))
-        self.assertEqual(len(tokens), 1)
-        self.assertEqual(tokens[0][0], Name)
-        s = "left join_col"
-        tokens = list(lexer.tokenize(s))
-        self.assertEqual(len(tokens), 3)
-        self.assertEqual(tokens[2][0], Name)
-        self.assertEqual(tokens[2][1], 'join_col')
-
-    def test_negative_numbers(self):
-        s = "values(-1)"
-        tokens = list(lexer.tokenize(s))
-        self.assertEqual(len(tokens), 4)
-        self.assertEqual(tokens[2][0], Number.Integer)
-        self.assertEqual(tokens[2][1], '-1')
-
-    # Somehow this test fails on Python 3.2
-    @pytest.mark.skipif('sys.version_info >= (3,0)')
-    def test_tab_expansion(self):
-        s = "\t"
-        lex = lexer.Lexer()
-        lex.tabsize = 5
-        tokens = list(lex.get_tokens(s))
-        self.assertEqual(tokens[0][1], " " * 5)
-
-
-class TestToken(unittest.TestCase):
-
-    def test_str(self):
-        token = sql.Token(None, 'FoO')
-        self.assertEqual(str(token), 'FoO')
-
-    def test_repr(self):
-        token = sql.Token(Keyword, 'foo')
-        tst = "<Keyword 'foo' at 0x"
-        self.assertEqual(repr(token)[:len(tst)], tst)
-        token = sql.Token(Keyword, '1234567890')
-        tst = "<Keyword '123456...' at 0x"
-        self.assertEqual(repr(token)[:len(tst)], tst)
-
-    def test_flatten(self):
-        token = sql.Token(Keyword, 'foo')
-        gen = token.flatten()
-        self.assertEqual(type(gen), types.GeneratorType)
-        lgen = list(gen)
-        self.assertEqual(lgen, [token])
-
-
-class TestTokenList(unittest.TestCase):
-
-    def test_repr(self):
-        p = sqlparse.parse('foo, bar, baz')[0]
-        tst = "<IdentifierList 'foo, b...' at 0x"
-        self.assertEqual(repr(p.tokens[0])[:len(tst)], tst)
-
-    def test_token_first(self):
-        p = sqlparse.parse(' select foo')[0]
-        first = p.token_first()
-        self.assertEqual(first.value, 'select')
-        self.assertEqual(p.token_first(ignore_whitespace=False).value, ' ')
-        self.assertEqual(sql.TokenList([]).token_first(), None)
-
-    def test_token_matching(self):
-        t1 = sql.Token(Keyword, 'foo')
-        t2 = sql.Token(Punctuation, ',')
-        x = sql.TokenList([t1, t2])
-        self.assertEqual(x.token_matching(0, [lambda t: t.ttype is Keyword]),
-                         t1)
-        self.assertEqual(x.token_matching(0,
-                                          [lambda t: t.ttype is Punctuation]),
-                         t2)
-        self.assertEqual(x.token_matching(1, [lambda t: t.ttype is Keyword]),
-                         None)
-
-
-class TestStream(unittest.TestCase):
-    def test_simple(self):
-        from cStringIO import StringIO
-
-        stream = StringIO("SELECT 1; SELECT 2;")
-        lex = lexer.Lexer()
-
-        tokens = lex.get_tokens(stream)
-        self.assertEqual(len(list(tokens)), 9)
-
-        stream.seek(0)
-        lex.bufsize = 4
-        tokens = list(lex.get_tokens(stream))
-        self.assertEqual(len(tokens), 9)
-
-        stream.seek(0)
-        lex.bufsize = len(stream.getvalue())
-        tokens = list(lex.get_tokens(stream))
-        self.assertEqual(len(tokens), 9)
-
-    def test_error(self):
-        from cStringIO import StringIO
-
-        stream = StringIO("FOOBAR{")
-
-        lex = lexer.Lexer()
-        lex.bufsize = 4
-        tokens = list(lex.get_tokens(stream))
-        self.assertEqual(len(tokens), 2)
-        self.assertEqual(tokens[1][0], Error)
-
-
-@pytest.mark.parametrize('expr', ['JOIN', 'LEFT JOIN', 'LEFT OUTER JOIN',
-                                  'FULL OUTER JOIN', 'NATURAL JOIN',
-                                  'CROSS JOIN', 'STRAIGHT JOIN',
-                                  'INNER JOIN', 'LEFT INNER JOIN'])
-def test_parse_join(expr):
-    p = sqlparse.parse('%s foo' % expr)[0]
-    assert len(p.tokens) == 3
-    assert p.tokens[0].ttype is Keyword
-
-
-def test_parse_endifloop():
-    p = sqlparse.parse('END IF')[0]
-    assert len(p.tokens) == 1
-    assert p.tokens[0].ttype is Keyword
-    p = sqlparse.parse('END   IF')[0]
-    assert len(p.tokens) == 1
-    p = sqlparse.parse('END\t\nIF')[0]
-    assert len(p.tokens) == 1
-    assert p.tokens[0].ttype is Keyword
-    p = sqlparse.parse('END LOOP')[0]
-    assert len(p.tokens) == 1
-    assert p.tokens[0].ttype is Keyword
-    p = sqlparse.parse('END  LOOP')[0]
-    assert len(p.tokens) == 1
-    assert p.tokens[0].ttype is Keyword

http://git-wip-us.apache.org/repos/asf/impala/blob/49413d9c/shell/ext-py/sqlparse-0.1.14/tests/utils.py
----------------------------------------------------------------------
diff --git a/shell/ext-py/sqlparse-0.1.14/tests/utils.py b/shell/ext-py/sqlparse-0.1.14/tests/utils.py
deleted file mode 100644
index 9eb46bf..0000000
--- a/shell/ext-py/sqlparse-0.1.14/tests/utils.py
+++ /dev/null
@@ -1,46 +0,0 @@
-# -*- coding: utf-8 -*-
-
-"""Helpers for testing."""
-
-import codecs
-import difflib
-import os
-import unittest
-from StringIO import StringIO
-
-import sqlparse.utils
-
-NL = '\n'
-DIR_PATH = os.path.abspath(os.path.dirname(__file__))
-PARENT_DIR = os.path.dirname(DIR_PATH)
-FILES_DIR = os.path.join(DIR_PATH, 'files')
-
-
-def load_file(filename, encoding='utf-8'):
-    """Opens filename with encoding and return it's contents."""
-    f = codecs.open(os.path.join(FILES_DIR, filename), 'r', encoding)
-    data = f.read()
-    f.close()
-    return data
-
-
-class TestCaseBase(unittest.TestCase):
-    """Base class for test cases with some additional checks."""
-
-    # Adopted from Python's tests.
-    def ndiffAssertEqual(self, first, second):
-        """Like failUnlessEqual except use ndiff for readable output."""
-        if first != second:
-            sfirst = unicode(first)
-            ssecond = unicode(second)
-            # Using the built-in .splitlines() method here will cause incorrect
-            # results when splitting statements that have quoted CR/CR+LF
-            # characters.
-            sfirst = sqlparse.utils.split_unquoted_newlines(sfirst)
-            ssecond = sqlparse.utils.split_unquoted_newlines(ssecond)
-            diff = difflib.ndiff(sfirst, ssecond)
-            fp = StringIO()
-            fp.write(NL)
-            fp.write(NL.join(diff))
-            print fp.getvalue()
-            raise self.failureException, fp.getvalue()

http://git-wip-us.apache.org/repos/asf/impala/blob/49413d9c/shell/ext-py/sqlparse-0.1.14/tox.ini
----------------------------------------------------------------------
diff --git a/shell/ext-py/sqlparse-0.1.14/tox.ini b/shell/ext-py/sqlparse-0.1.14/tox.ini
deleted file mode 100644
index e797ca9..0000000
--- a/shell/ext-py/sqlparse-0.1.14/tox.ini
+++ /dev/null
@@ -1,37 +0,0 @@
-[tox]
-envlist=py26,py27,py32,py33,py34,pypy
-
-[testenv]
-deps=
-  pytest
-  pytest-cov
-commands=
-  sqlformat --version  # Sanity check.
-  py.test --cov=sqlparse/ tests
-
-[testenv:py32]
-changedir={envdir}
-commands=
-  sqlformat --version  # Sanity check.
-  rm -rf tests/
-  cp -r {toxinidir}/tests/ tests/
-  2to3 -w --no-diffs -n tests/
-  py.test --cov={envdir}/lib/python3.2/site-packages/sqlparse/ tests
-
-[testenv:py33]
-changedir={envdir}
-commands=
-  sqlformat --version  # Sanity check.
-  rm -rf tests/
-  cp -r {toxinidir}/tests/ tests/
-  2to3 -w --no-diffs -n tests/
-  py.test --cov={envdir}/lib/python3.3/site-packages/sqlparse/ tests
-
-[testenv:py34]
-changedir={envdir}
-commands=
-  sqlformat --version  # Sanity check.
-  rm -rf tests/
-  cp -r {toxinidir}/tests/ tests/
-  2to3 -w --no-diffs -n tests/
-  py.test --cov={envdir}/lib/python3.4/site-packages/sqlparse/ tests

http://git-wip-us.apache.org/repos/asf/impala/blob/49413d9c/shell/ext-py/sqlparse-0.1.19/.travis.yml
----------------------------------------------------------------------
diff --git a/shell/ext-py/sqlparse-0.1.19/.travis.yml b/shell/ext-py/sqlparse-0.1.19/.travis.yml
new file mode 100644
index 0000000..313afb8
--- /dev/null
+++ b/shell/ext-py/sqlparse-0.1.19/.travis.yml
@@ -0,0 +1,14 @@
+language: python
+python: 2.7
+env:
+  - TOX_ENV=py26
+  - TOX_ENV=py27
+  - TOX_ENV=py33
+  - TOX_ENV=py34
+  - TOX_ENV=pypy
+before_install:
+  - sudo apt-get install pypy
+install:
+  - pip install tox
+script:
+  - tox -e $TOX_ENV

http://git-wip-us.apache.org/repos/asf/impala/blob/49413d9c/shell/ext-py/sqlparse-0.1.19/AUTHORS
----------------------------------------------------------------------
diff --git a/shell/ext-py/sqlparse-0.1.19/AUTHORS b/shell/ext-py/sqlparse-0.1.19/AUTHORS
new file mode 100644
index 0000000..78052ff
--- /dev/null
+++ b/shell/ext-py/sqlparse-0.1.19/AUTHORS
@@ -0,0 +1,31 @@
+python-sqlparse is written and maintained by Andi Albrecht <al...@gmail.com>.
+
+This module contains code (namely the lexer and filter mechanism) from
+the pygments project that was written by Georg Brandl.
+
+Alphabetical list of contributors:
+* Alexander Beedie <ay...@gmail.com>
+* Alexey Malyshev <no...@gmail.com>
+* casey <ca...@cloudera.com>
+* Cristian Orellana <cr...@groupon.com>
+* Darik Gamble <da...@gmail.com>
+* Florian Bauer <fl...@zmdi.com>
+* Gavin Wahl <gw...@fusionbox.com>
+* JacekPliszka <Ja...@gmail.com>
+* Jesús Leganés Combarro "Piranna" <pi...@gmail.com>
+* Kevin Jing Qiu <ke...@gmail.com>
+* Michael Schuller <ch...@mschuller.net>
+* Mike Amy <co...@googlemail.com>
+* mulos <da...@gmail.com>
+* Piet Delport <pj...@gmail.com>
+* Prudhvi Vatala <pv...@gmail.com>
+* quest <qu...@wonky.windwards.net>
+* Robert Nix <co...@rnix.org>
+* Rocky Meza <rm...@fusionbox.com>
+* Ryan Wooden <ry...@gmail.com>
+* spigwitmer <it...@gmail.com>
+* Tim Graham <ti...@gmail.com>
+* Victor Hahn <in...@victor-hahn.de>
+* vthriller <fa...@yandex.ru>
+* wayne.wuw <wa...@alibaba-inc.com>
+* Yago Riveiro <ya...@gmail.com>

http://git-wip-us.apache.org/repos/asf/impala/blob/49413d9c/shell/ext-py/sqlparse-0.1.19/CHANGES
----------------------------------------------------------------------
diff --git a/shell/ext-py/sqlparse-0.1.19/CHANGES b/shell/ext-py/sqlparse-0.1.19/CHANGES
new file mode 100644
index 0000000..267f2d0
--- /dev/null
+++ b/shell/ext-py/sqlparse-0.1.19/CHANGES
@@ -0,0 +1,302 @@
+Release 0.1.19 (Mar 07, 2015)
+-----------------------------
+
+Bug Fixes
+* Fix IndexError when statement contains WITH clauses (issue205).
+
+
+Release 0.1.18 (Oct 25, 2015)
+-----------------------------
+
+Bug Fixes
+* Remove universal wheel support, added in 0.1.17 by mistake.
+
+
+Release 0.1.17 (Oct 24, 2015)
+-----------------------------
+
+Enhancements
+* Speed up parsing of large SQL statements (pull request: issue201, fixes the
+  following issues: issue199, issue135, issue62, issue41, by Ryan Wooden).
+
+Bug Fixes
+* Fix another splitter bug regarding DECLARE (issue194).
+
+Misc
+* Packages on PyPI are signed from now on.
+
+
+Release 0.1.16 (Jul 26, 2015)
+-----------------------------
+
+Bug Fixes
+* Fix a regression in get_alias() introduced in 0.1.15 (issue185).
+* Fix a bug in the splitter regarding DECLARE (issue193).
+* sqlformat command line tool doesn't duplicat newlines anymore (issue191).
+* Don't mix up MySQL comments starting with hash and MSSQL
+  temp tables (issue192).
+* Statement.get_type() now ignores comments at the beginning of
+  a statement (issue186).
+
+
+Release 0.1.15 (Apr 15, 2015)
+-----------------------------
+
+Bug Fixes
+* Fix a regression for identifiers with square bracktes
+  notation (issue153, by darikg).
+* Add missing SQL types (issue154, issue155, issue156, by jukebox).
+* Fix parsing of multi-line comments (issue172, by JacekPliszka).
+* Fix parsing of escaped backslashes (issue174, by caseyching).
+* Fix parsing of identifiers starting with underscore (issue175).
+* Fix misinterpretation of IN keyword (issue183).
+
+Enhancements
+* Improve formatting of HAVING statements.
+* Improve parsing of inline comments (issue163).
+* Group comments to parent object (issue128, issue160).
+* Add double precision builtin (issue169, by darikg).
+* Add support for square bracket array indexing (issue170, issue176,
+  issue177 by darikg).
+* Improve grouping of aliased elements (issue167, by darikg).
+* Support comments starting with '#' character (issue178).
+
+
+Release 0.1.14 (Nov 30, 2014)
+-----------------------------
+
+Bug Fixes
+* Floats in UPDATE statements are now handled correctly (issue145).
+* Properly handle string literals in comparisons (issue148, change proposed
+  by aadis).
+* Fix indentation when using tabs (issue146).
+
+Enhancements
+* Improved formatting in list when newlines precede commas (issue140).
+
+
+Release 0.1.13 (Oct 09, 2014)
+-----------------------------
+
+Bug Fixes
+* Fix a regression in handling of NULL keywords introduced in 0.1.12.
+
+
+Release 0.1.12 (Sep 20, 2014)
+-----------------------------
+
+Bug Fixes
+* Fix handling of NULL keywords in aliased identifiers.
+* Fix SerializerUnicode to split unquoted newlines (issue131, by Michael Schuller).
+* Fix handling of modulo operators without spaces (by gavinwahl).
+
+Enhancements
+* Improve parsing of identifier lists containing placeholders.
+* Speed up query parsing of unquoted lines (by Michael Schuller).
+
+
+Release 0.1.11 (Feb 07, 2014)
+-----------------------------
+
+Bug Fixes
+* Fix incorrect parsing of string literals containing line breaks (issue118).
+* Fix typo in keywords, add MERGE, COLLECT keywords (issue122/124,
+  by Cristian Orellana).
+* Improve parsing of string literals in columns.
+* Fix parsing and formatting of statements containing EXCEPT keyword.
+* Fix Function.get_parameters() (issue126/127, by spigwitmer).
+
+Enhancements
+* Classify DML keywords (issue116, by Victor Hahn).
+* Add missing FOREACH keyword.
+* Grouping of BEGIN/END blocks.
+
+Other
+* Python 2.5 isn't automatically tested anymore, neither Travis nor Tox
+  still support it out of the box.
+
+
+Release 0.1.10 (Nov 02, 2013)
+-----------------------------
+
+Bug Fixes
+* Removed buffered reading again, it obviously causes wrong parsing in some rare
+  cases (issue114).
+* Fix regression in setup.py introduced 10 months ago (issue115).
+
+Enhancements
+* Improved support for JOINs, by Alexander Beedie.
+
+
+Release 0.1.9 (Sep 28, 2013)
+----------------------------
+
+Bug Fixes
+* Fix an regression introduced in 0.1.5 where sqlparse didn't properly
+  distinguished between single and double quoted strings when tagging
+  identifier (issue111).
+
+Enhancements
+* New option to truncate long string literals when formatting.
+* Scientific numbers are pares correctly (issue107).
+* Support for arithmetic expressions (issue109, issue106; by prudhvi).
+
+
+Release 0.1.8 (Jun 29, 2013)
+----------------------------
+
+Bug Fixes
+* Whitespaces within certain keywords are now allowed (issue97, patch proposed
+  by xcombelle).
+
+Enhancements
+* Improve parsing of assignments in UPDATE statements (issue90).
+* Add STRAIGHT_JOIN statement (by Yago Riveiro).
+* Function.get_parameters() now returns the parameter if only one parameter is
+  given (issue94, by wayne.wuw).
+* sqlparse.split() now removes leading and trailing whitespaces from splitted
+  statements.
+* Add USE as keyword token (by mulos).
+* Improve parsing of PEP249-style placeholders (issue103).
+
+
+Release 0.1.7 (Apr 06, 2013)
+----------------------------
+
+Bug Fixes
+ * Fix Python 3 compatibility of sqlformat script (by Piet Delport).
+ * Fix parsing of SQL statements that contain binary data (by Alexey
+   Malyshev).
+ * Fix a bug where keywords were identified as aliased identifiers in
+   invalid SQL statements.
+ * Fix parsing of identifier lists where identifiers are keywords too
+   (issue10).
+
+Enhancements
+ * Top-level API functions now accept encoding keyword to parse
+   statements in certain encodings more reliable (issue20).
+ * Improve parsing speed when SQL contains CLOBs or BLOBs (issue86).
+ * Improve formatting of ORDER BY clauses (issue89).
+ * Formatter now tries to detect runaway indentations caused by
+   parsing errors or invalid SQL statements. When re-indenting such
+   statements the formatter flips back to column 0 before going crazy.
+
+Other
+ * Documentation updates.
+
+
+Release 0.1.6 (Jan 01, 2013)
+----------------------------
+
+sqlparse is now compatible with Python 3 without any patches. The
+Python 3 version is generated during install by 2to3. You'll need
+distribute to install sqlparse for Python 3.
+
+Bug Fixes
+ * Fix parsing error with dollar-quoted procedure bodies (issue83).
+
+Other
+ * Documentation updates.
+ * Test suite now uses tox and py.test.
+ * py3k fixes (by vthriller).
+ * py3k fixes in setup.py (by Florian Bauer).
+ * setup.py now requires distribute (by Florian Bauer).
+
+
+Release 0.1.5 (Nov 13, 2012)
+----------------------------
+
+Bug Fixes
+ * Improve handling of quoted identifiers (issue78).
+ * Improve grouping and formatting of identifiers with operators (issue53).
+ * Improve grouping and formatting of concatenated strings (issue53).
+ * Improve handling of varchar() (by Mike Amy).
+ * Clean up handling of various SQL elements.
+ * Switch to py.test and clean up tests.
+ * Several minor fixes.
+
+Other
+ * Deprecate sqlparse.SQLParseError. Please use
+   sqlparse.exceptions.SQLParseError instead.
+ * Add caching to speed up processing.
+ * Add experimental filters for token processing.
+ * Add sqlformat.parsestream (by quest).
+
+
+Release 0.1.4 (Apr 20, 2012)
+----------------------------
+
+Bug Fixes
+ * Avoid "stair case" effects when identifiers, functions,
+   placeholders or keywords are mixed in identifier lists (issue45,
+   issue49, issue52) and when asterisks are used as operators
+   (issue58).
+ * Make keyword detection more restrict (issue47).
+ * Improve handling of CASE statements (issue46).
+ * Fix statement splitting when parsing recursive statements (issue57,
+   thanks to piranna).
+ * Fix for negative numbers (issue56, thanks to kevinjqiu).
+ * Pretty format comments in identifier lists (issue59).
+ * Several minor bug fixes and improvements.
+
+
+Release 0.1.3 (Jul 29, 2011)
+----------------------------
+
+Bug Fixes
+ * Improve parsing of floats (thanks to Kris).
+ * When formatting a statement a space before LIMIT was removed (issue35).
+ * Fix strip_comments flag (issue38, reported by ooberm...@gmail.com).
+ * Avoid parsing names as keywords (issue39, reported by djo...@taket.org).
+ * Make sure identifier lists in subselects are grouped (issue40,
+   reported by djo...@taket.org).
+ * Split statements with IF as functions correctly (issue33 and
+   issue29, reported by charles....@unige.ch).
+ * Relax detection of keywords, esp. when used as function names
+   (issue36, nyuhu...@gmail.com).
+ * Don't treat single characters as keywords (issue32).
+ * Improve parsing of stand-alone comments (issue26).
+ * Detection of placeholders in paramterized queries (issue22,
+   reported by Glyph Lefkowitz).
+ * Add parsing of MS Access column names with braces (issue27,
+   reported by frankz...@gmail.com).
+
+Other
+ * Replace Django by Flask in App Engine frontend (issue11).
+
+
+Release 0.1.2 (Nov 23, 2010)
+----------------------------
+
+Bug Fixes
+ * Fixed incorrect detection of keyword fragments embed in names (issue7,
+   reported and initial patch by andyboyko).
+ * Stricter detection of identfier aliases (issue8, reported by estama).
+ * WHERE grouping consumed closing parenthesis (issue9, reported by estama).
+ * Fixed an issue with trailing whitespaces (reported by Kris).
+ * Better detection of escaped single quotes (issue13, reported by
+   Martin Brochhaus, patch by bluemaro with test case by Dan Carley).
+ * Ignore identifier in double-quotes when changing cases (issue 21).
+ * Lots of minor fixes targeting encoding, indentation, statement
+   parsing and more (issues 12, 14, 15, 16, 18, 19).
+ * Code cleanup with a pinch of refactoring.
+
+
+Release 0.1.1 (May 6, 2009)
+---------------------------
+
+Bug Fixes
+ * Lexers preserves original line breaks (issue1).
+ * Improved identifier parsing: backtick quotes, wildcards, T-SQL variables
+   prefixed with @.
+ * Improved parsing of identifier lists (issue2).
+ * Recursive recognition of AS (issue4) and CASE.
+ * Improved support for UPDATE statements.
+
+Other
+ * Code cleanup and better test coverage.
+
+
+Release 0.1.0 (Apr 8, 2009)
+---------------------------
+ * Initial release.

http://git-wip-us.apache.org/repos/asf/impala/blob/49413d9c/shell/ext-py/sqlparse-0.1.19/COPYING
----------------------------------------------------------------------
diff --git a/shell/ext-py/sqlparse-0.1.19/COPYING b/shell/ext-py/sqlparse-0.1.19/COPYING
new file mode 100644
index 0000000..7b158da
--- /dev/null
+++ b/shell/ext-py/sqlparse-0.1.19/COPYING
@@ -0,0 +1,25 @@
+Copyright (c) 2009, Andi Albrecht <al...@gmail.com>
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    * Redistributions of source code must retain the above copyright notice,
+      this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright notice,
+      this list of conditions and the following disclaimer in the documentation
+      and/or other materials provided with the distribution.
+    * Neither the name of the authors nor the names of its contributors may be
+      used to endorse or promote products derived from this software without
+      specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/impala/blob/49413d9c/shell/ext-py/sqlparse-0.1.19/MANIFEST.in
----------------------------------------------------------------------
diff --git a/shell/ext-py/sqlparse-0.1.19/MANIFEST.in b/shell/ext-py/sqlparse-0.1.19/MANIFEST.in
new file mode 100644
index 0000000..e546b1e
--- /dev/null
+++ b/shell/ext-py/sqlparse-0.1.19/MANIFEST.in
@@ -0,0 +1,11 @@
+recursive-include docs source/*
+include docs/sqlformat.1
+include docs/Makefile
+recursive-include tests *.py *.sql
+include COPYING
+include TODO
+include AUTHORS
+include CHANGES
+include Makefile
+include pytest.ini
+include tox.ini
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/impala/blob/49413d9c/shell/ext-py/sqlparse-0.1.19/README.rst
----------------------------------------------------------------------
diff --git a/shell/ext-py/sqlparse-0.1.19/README.rst b/shell/ext-py/sqlparse-0.1.19/README.rst
new file mode 100644
index 0000000..dbe2fe4
--- /dev/null
+++ b/shell/ext-py/sqlparse-0.1.19/README.rst
@@ -0,0 +1,56 @@
+python-sqlparse - Parse SQL statements
+======================================
+
+sqlparse is a non-validating SQL parser module for Python.
+
+|buildstatus|_
+
+
+Install
+-------
+
+Run::
+
+  python setup.py install
+
+to install python-sqlparse on your system.
+
+python-sqlparse is compatible with Python 2 (>= 2.5) and Python 3 (>= 3.2).
+
+
+Run Tests
+---------
+
+To run the test suite run::
+
+  tox
+
+Note, you'll need tox installed, of course.
+
+
+Links
+-----
+
+Project Page
+  https://github.com/andialbrecht/sqlparse
+
+Documentation
+  http://readthedocs.org/docs/sqlparse/en/latest/
+
+Discussions
+  http://groups.google.com/group/sqlparse
+
+Issues/Bugs
+  https://github.com/andialbrecht/sqlparse/issues
+
+Online Demo
+  http://sqlformat.org
+
+
+python-sqlparse is licensed under the BSD license.
+
+Parts of the code are based on pygments written by Georg Brandl and others.
+pygments-Homepage: http://pygments.org/
+
+.. |buildstatus| image:: https://secure.travis-ci.org/andialbrecht/sqlparse.png?branch=master
+.. _buildstatus: http://travis-ci.org/#!/andialbrecht/sqlparse

http://git-wip-us.apache.org/repos/asf/impala/blob/49413d9c/shell/ext-py/sqlparse-0.1.19/TODO
----------------------------------------------------------------------
diff --git a/shell/ext-py/sqlparse-0.1.19/TODO b/shell/ext-py/sqlparse-0.1.19/TODO
new file mode 100644
index 0000000..166df20
--- /dev/null
+++ b/shell/ext-py/sqlparse-0.1.19/TODO
@@ -0,0 +1,7 @@
+* See
+  https://groups.google.com/d/msg/sqlparse/huz9lKXt0Lc/11ybIKPJWbUJ
+  for some interesting hints and suggestions.
+* Provide a function to replace tokens. See this thread: https://groups.google.com/d/msg/sqlparse/5xmBL2UKqX4/ZX9z_peve-AJ
+* Fix bugs on issue tracker.
+* Document filter stack and processing phases.
+* See KnownIssues http://code.google.com/p/python-sqlparse/wiki/KnownIssues

http://git-wip-us.apache.org/repos/asf/impala/blob/49413d9c/shell/ext-py/sqlparse-0.1.19/bin/sqlformat
----------------------------------------------------------------------
diff --git a/shell/ext-py/sqlparse-0.1.19/bin/sqlformat b/shell/ext-py/sqlparse-0.1.19/bin/sqlformat
new file mode 100755
index 0000000..cecbed9
--- /dev/null
+++ b/shell/ext-py/sqlparse-0.1.19/bin/sqlformat
@@ -0,0 +1,109 @@
+#!/usr/bin/env python
+
+# Copyright (C) 2008 Andi Albrecht, albrecht.andi@gmail.com
+#
+# This module is part of python-sqlparse and is released under
+# the BSD License: http://www.opensource.org/licenses/bsd-license.php.
+
+import optparse
+import os
+import sys
+
+import sqlparse
+from sqlparse.exceptions import SQLParseError
+
+
+_CASE_CHOICES = ['upper', 'lower', 'capitalize']
+
+
+parser = optparse.OptionParser(usage='%prog [OPTIONS] FILE, ...',
+                               version='%%prog %s' % sqlparse.__version__)
+parser.set_description(('Format FILE according to OPTIONS. Use "-" as FILE '
+                        'to read from stdin.'))
+parser.add_option('-v', '--verbose', dest='verbose', action='store_true')
+parser.add_option('-o', '--outfile', dest='outfile', metavar='FILE',
+                  help='write output to FILE (defaults to stdout)')
+group = parser.add_option_group('Formatting Options')
+group.add_option('-k', '--keywords', metavar='CHOICE',
+                 dest='keyword_case', choices=_CASE_CHOICES,
+                 help=('change case of keywords, CHOICE is one of %s'
+                       % ', '.join('"%s"' % x for x in _CASE_CHOICES)))
+group.add_option('-i', '--identifiers', metavar='CHOICE',
+                 dest='identifier_case', choices=_CASE_CHOICES,
+                 help=('change case of identifiers, CHOICE is one of %s'
+                       % ', '.join('"%s"' % x for x in _CASE_CHOICES)))
+group.add_option('-l', '--language', metavar='LANG',
+                 dest='output_format', choices=['python', 'php'],
+                 help=('output a snippet in programming language LANG, '
+                       'choices are "python", "php"'))
+group.add_option('--strip-comments', dest='strip_comments',
+                 action='store_true', default=False,
+                 help='remove comments')
+group.add_option('-r', '--reindent', dest='reindent',
+                 action='store_true', default=False,
+                 help='reindent statements')
+group.add_option('--indent_width', dest='indent_width', default=2,
+                 help='indentation width (defaults to 2 spaces)')
+
+_FORMATTING_GROUP = group
+
+
+def _error(msg, exit_=None):
+    """Print msg and optionally exit with return code exit_."""
+    sys.stderr.write('[ERROR] %s\n' % msg)
+    if exit_ is not None:
+        sys.exit(exit_)
+
+
+def _build_formatter_opts(options):
+    """Convert command line options to dictionary."""
+    d = {}
+    for option in _FORMATTING_GROUP.option_list:
+        d[option.dest] = getattr(options, option.dest)
+    return d
+
+
+def main():
+    options, args = parser.parse_args()
+    if options.verbose:
+        sys.stderr.write('Verbose mode\n')
+
+    if len(args) != 1:
+        _error('No input data.')
+        parser.print_usage()
+        sys.exit(1)
+
+    if '-' in args:  # read from stdin
+        data = sys.stdin.read()
+    else:
+        try:
+            data = ''.join(open(args[0]).readlines())
+        except OSError:
+            err = sys.exc_info()[1]  # Python 2.5 compatibility
+            _error('Failed to read %s: %s' % (args[0], err), exit_=1)
+
+    if options.outfile:
+        try:
+            stream = open(options.outfile, 'w')
+        except OSError:
+            err = sys.exc_info()[1]  # Python 2.5 compatibility
+            _error('Failed to open %s: %s' % (options.outfile, err), exit_=1)
+    else:
+        stream = sys.stdout
+
+    formatter_opts = _build_formatter_opts(options)
+    try:
+        formatter_opts = sqlparse.formatter.validate_options(formatter_opts)
+    except SQLParseError:
+        err = sys.exc_info()[1]  # Python 2.5 compatibility
+        _error('Invalid options: %s' % err, exit_=1)
+
+    s = sqlparse.format(data, **formatter_opts)
+    if sys.version_info < (3,):
+        s = s.encode('utf-8', 'replace')
+    stream.write(s)
+    stream.flush()
+
+
+if __name__ == '__main__':
+    main()

http://git-wip-us.apache.org/repos/asf/impala/blob/49413d9c/shell/ext-py/sqlparse-0.1.19/docs/source/analyzing.rst
----------------------------------------------------------------------
diff --git a/shell/ext-py/sqlparse-0.1.19/docs/source/analyzing.rst b/shell/ext-py/sqlparse-0.1.19/docs/source/analyzing.rst
new file mode 100644
index 0000000..5af5350
--- /dev/null
+++ b/shell/ext-py/sqlparse-0.1.19/docs/source/analyzing.rst
@@ -0,0 +1,64 @@
+.. _analyze:
+
+Analyzing the Parsed Statement
+==============================
+
+When the :meth:`~sqlparse.parse` function is called the returned value
+is a tree-ish representation of the analyzed statements. The returned
+objects can be used by applications to retrieve further information about
+the parsed SQL.
+
+
+Base Classes
+------------
+
+All returned objects inherit from these base classes.
+The :class:`~sqlparse.sql.Token` class represents a single token and
+:class:`~sqlparse.sql.TokenList` class is a group of tokens.
+The latter provides methods for inspecting it's child tokens.
+
+.. autoclass:: sqlparse.sql.Token
+   :members:
+
+.. autoclass:: sqlparse.sql.TokenList
+   :members:
+
+
+SQL Representing Classes
+------------------------
+
+The following classes represent distinct parts of a SQL statement.
+
+.. autoclass:: sqlparse.sql.Statement
+   :members:
+
+.. autoclass:: sqlparse.sql.Comment
+   :members:
+
+.. autoclass:: sqlparse.sql.Identifier
+   :members:
+
+.. autoclass:: sqlparse.sql.IdentifierList
+   :members:
+
+.. autoclass:: sqlparse.sql.Where
+   :members:
+
+.. autoclass:: sqlparse.sql.Case
+   :members:
+
+.. autoclass:: sqlparse.sql.Parenthesis
+   :members:
+
+.. autoclass:: sqlparse.sql.If
+   :members:
+
+.. autoclass:: sqlparse.sql.For
+   :members:
+
+.. autoclass:: sqlparse.sql.Assignment
+   :members:
+
+.. autoclass:: sqlparse.sql.Comparison
+   :members:
+

http://git-wip-us.apache.org/repos/asf/impala/blob/49413d9c/shell/ext-py/sqlparse-0.1.19/docs/source/api.rst
----------------------------------------------------------------------
diff --git a/shell/ext-py/sqlparse-0.1.19/docs/source/api.rst b/shell/ext-py/sqlparse-0.1.19/docs/source/api.rst
new file mode 100644
index 0000000..518a428
--- /dev/null
+++ b/shell/ext-py/sqlparse-0.1.19/docs/source/api.rst
@@ -0,0 +1,57 @@
+:mod:`sqlparse` -- Parse SQL statements
+=======================================
+
+.. module:: sqlparse
+   :synopsis: Parse SQL statements.
+
+The :mod:`sqlparse` module provides the following functions on module-level.
+
+.. autofunction:: sqlparse.split
+
+.. autofunction:: sqlparse.format
+
+.. autofunction:: sqlparse.parse
+
+In most cases there's no need to set the `encoding` parameter. If
+`encoding` is not set, sqlparse assumes that the given SQL statement
+is encoded either in utf-8 or latin-1.
+
+
+.. _formatting:
+
+Formatting of SQL Statements
+----------------------------
+
+The :meth:`~sqlparse.format` function accepts the following keyword arguments.
+
+``keyword_case``
+  Changes how keywords are formatted. Allowed values are "upper", "lower"
+  and "capitalize".
+
+``identifier_case``
+  Changes how identifiers are formatted. Allowed values are "upper", "lower",
+  and "capitalize".
+
+``strip_comments``
+  If ``True`` comments are removed from the statements.
+
+``truncate_strings``
+  If ``truncate_strings`` is a positive integer, string literals longer than
+  the given value will be truncated.
+
+``truncate_char`` (default: "[...]")
+  If long string literals are truncated (see above) this value will be append
+  to the truncated string.
+
+``reindent``
+  If ``True`` the indentations of the statements are changed.
+
+``indent_tabs``
+  If ``True`` tabs instead of spaces are used for indentation.
+
+``indent_width``
+  The width of the indentation, defaults to 2.
+
+``output_format``
+  If given the output is additionally formatted to be used as a variable
+  in a programming language. Allowed values are "python" and "php".

http://git-wip-us.apache.org/repos/asf/impala/blob/49413d9c/shell/ext-py/sqlparse-0.1.19/docs/source/changes.rst
----------------------------------------------------------------------
diff --git a/shell/ext-py/sqlparse-0.1.19/docs/source/changes.rst b/shell/ext-py/sqlparse-0.1.19/docs/source/changes.rst
new file mode 100644
index 0000000..7c1b861
--- /dev/null
+++ b/shell/ext-py/sqlparse-0.1.19/docs/source/changes.rst
@@ -0,0 +1,13 @@
+.. _changes:
+
+Changes in python-sqlparse
+==========================
+
+Upcoming Deprecations
+---------------------
+
+* ``sqlparse.SQLParseError`` is deprecated (version 0.1.5), use
+  ``sqlparse.exceptions.SQLParseError`` instead.
+
+.. include:: ../../CHANGES
+

http://git-wip-us.apache.org/repos/asf/impala/blob/49413d9c/shell/ext-py/sqlparse-0.1.19/docs/source/conf.py
----------------------------------------------------------------------
diff --git a/shell/ext-py/sqlparse-0.1.19/docs/source/conf.py b/shell/ext-py/sqlparse-0.1.19/docs/source/conf.py
new file mode 100644
index 0000000..5f7d34f
--- /dev/null
+++ b/shell/ext-py/sqlparse-0.1.19/docs/source/conf.py
@@ -0,0 +1,200 @@
+# -*- coding: utf-8 -*-
+#
+# python-sqlparse documentation build configuration file, created by
+# sphinx-quickstart on Thu Feb 26 08:19:28 2009.
+#
+# This file is execfile()d with the current directory set to its containing dir.
+#
+# Note that not all possible configuration values are present in this
+# autogenerated file.
+#
+# All configuration values have a default; values that are commented out
+# serve to show the default.
+
+import datetime
+import sys, os
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+#sys.path.append(os.path.abspath('.'))
+sys.path.insert(0, os.path.join(os.path.dirname(__file__), '../../'))
+
+import sqlparse
+
+# -- General configuration -----------------------------------------------------
+
+# Add any Sphinx extension module names here, as strings. They can be extensions
+# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
+extensions = ['sphinx.ext.autodoc', 'sphinx.ext.todo', 'sphinx.ext.coverage',
+              'sphinx.ext.autosummary']
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['_templates']
+
+# The suffix of source filenames.
+source_suffix = '.rst'
+
+# The encoding of source files.
+#source_encoding = 'utf-8'
+
+# The master toctree document.
+master_doc = 'index'
+
+# General information about the project.
+project = u'python-sqlparse'
+copyright = u'%s, Andi Albrecht' % datetime.date.today().strftime('%Y')
+
+# The version info for the project you're documenting, acts as replacement for
+# |version| and |release|, also used in various other places throughout the
+# built documents.
+#
+# The short X.Y version.
+version = sqlparse.__version__
+# The full version, including alpha/beta/rc tags.
+release = sqlparse.__version__
+
+# The language for content autogenerated by Sphinx. Refer to documentation
+# for a list of supported languages.
+#language = None
+
+# There are two options for replacing |today|: either, you set today to some
+# non-false value, then it is used:
+#today = ''
+# Else, today_fmt is used as the format for a strftime call.
+#today_fmt = '%B %d, %Y'
+
+# List of documents that shouldn't be included in the build.
+#unused_docs = []
+
+# List of directories, relative to source directory, that shouldn't be searched
+# for source files.
+exclude_trees = []
+
+# The reST default role (used for this markup: `text`) to use for all documents.
+#default_role = None
+
+# If true, '()' will be appended to :func: etc. cross-reference text.
+#add_function_parentheses = True
+
+# If true, the current module name will be prepended to all description
+# unit titles (such as .. function::).
+#add_module_names = True
+
+# If true, sectionauthor and moduleauthor directives will be shown in the
+# output. They are ignored by default.
+#show_authors = False
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = 'tango'
+
+# A list of ignored prefixes for module index sorting.
+#modindex_common_prefix = []
+
+
+# -- Options for HTML output ---------------------------------------------------
+
+# The theme to use for HTML and HTML Help pages.  Major themes that come with
+# Sphinx are currently 'default' and 'sphinxdoc'.
+#html_theme = 'agogo'
+
+# Theme options are theme-specific and customize the look and feel of a theme
+# further.  For a list of options available for each theme, see the
+# documentation.
+#html_theme_options = {}
+
+# Add any paths that contain custom themes here, relative to this directory.
+#html_theme_path = [os.path.abspath('../')]
+
+# The name for this set of Sphinx documents.  If None, it defaults to
+# "<project> v<release> documentation".
+#html_title = None
+
+# A shorter title for the navigation bar.  Default is the same as html_title.
+#html_short_title = None
+
+# The name of an image file (relative to this directory) to place at the top
+# of the sidebar.
+#html_logo = None
+
+# The name of an image file (within the static path) to use as favicon of the
+# docs.  This file should be a Windows icon file (.ico) being 16x16 or 32x32
+# pixels large.
+#html_favicon = None
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+#html_static_path = ['_static']
+
+# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
+# using the given strftime format.
+#html_last_updated_fmt = '%b %d, %Y'
+
+# If true, SmartyPants will be used to convert quotes and dashes to
+# typographically correct entities.
+#html_use_smartypants = True
+
+# Custom sidebar templates, maps document names to template names.
+#html_sidebars = {}
+
+# Additional templates that should be rendered to pages, maps page names to
+# template names.
+#html_additional_pages = {}
+
+# If false, no module index is generated.
+#html_use_modindex = True
+
+# If false, no index is generated.
+#html_use_index = True
+
+# If true, the index is split into individual pages for each letter.
+#html_split_index = False
+
+# If true, links to the reST sources are added to the pages.
+#html_show_sourcelink = True
+
+# If true, an OpenSearch description file will be output, and all pages will
+# contain a <link> tag referring to it.  The value of this option must be the
+# base URL from which the finished HTML is served.
+#html_use_opensearch = ''
+
+# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
+#html_file_suffix = ''
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = 'python-sqlparsedoc'
+
+
+# -- Options for LaTeX output --------------------------------------------------
+
+# The paper size ('letter' or 'a4').
+#latex_paper_size = 'letter'
+
+# The font size ('10pt', '11pt' or '12pt').
+#latex_font_size = '10pt'
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title, author, documentclass [howto/manual]).
+latex_documents = [
+  ('index', 'python-sqlparse.tex', ur'python-sqlparse Documentation',
+   ur'Andi Albrecht', 'manual'),
+]
+
+# The name of an image file (relative to this directory) to place at the top of
+# the title page.
+#latex_logo = None
+
+# For "manual" documents, if this is true, then toplevel headings are parts,
+# not chapters.
+#latex_use_parts = False
+
+# Additional stuff for the LaTeX preamble.
+#latex_preamble = ''
+
+# Documents to append as an appendix to all manuals.
+#latex_appendices = []
+
+# If false, no module index is generated.
+#latex_use_modindex = True
+todo_include_todos = True

http://git-wip-us.apache.org/repos/asf/impala/blob/49413d9c/shell/ext-py/sqlparse-0.1.19/docs/source/index.rst
----------------------------------------------------------------------
diff --git a/shell/ext-py/sqlparse-0.1.19/docs/source/index.rst b/shell/ext-py/sqlparse-0.1.19/docs/source/index.rst
new file mode 100644
index 0000000..5eb76f7
--- /dev/null
+++ b/shell/ext-py/sqlparse-0.1.19/docs/source/index.rst
@@ -0,0 +1,61 @@
+.. python-sqlparse documentation master file, created by
+   sphinx-quickstart on Thu Feb 26 08:19:28 2009.
+   You can adapt this file completely to your liking, but it should at least
+   contain the root `toctree` directive.
+
+python-sqlparse
+===============
+
+:mod:`sqlparse` is a non-validating SQL parser for Python.
+It provides support for parsing, splitting and formatting SQL statements.
+
+The module is compatible with Python 2 (>= 2.5) and Python 3 (>= 3.2)
+and released under the terms of the `New BSD license
+<http://www.opensource.org/licenses/bsd-license.php>`_.
+
+Visit the project page at https://github.com/andialbrecht/sqlparse for
+further information about this project.
+
+
+tl;dr
+-----
+
+.. code-block:: bash
+
+   $ pip install sqlparse
+   $ python
+   >>> import sqlparse
+   >>> print(sqlparse.format('select * from foo', reindent=True))
+   select *
+   from foo
+   >>> parsed = sqlparse.parse('select * from foo')[0]
+   >>> parsed.tokens
+   [<DML 'select' at 0x7f22c5e15368>, <Whitespace ' ' at 0x7f22c5e153b0>, <Wildcard '*' … ]
+   >>> 
+
+
+Contents
+--------
+
+.. toctree::
+   :maxdepth: 2
+
+   intro
+   api
+   analyzing
+   ui
+   changes
+   indices
+
+
+Resources
+---------
+
+Project page
+   https://github.com/andialbrecht/sqlparse
+
+Bug tracker
+   https://github.com/andialbrecht/sqlparse/issues
+
+Documentation
+   http://sqlparse.readthedocs.org/

http://git-wip-us.apache.org/repos/asf/impala/blob/49413d9c/shell/ext-py/sqlparse-0.1.19/docs/source/indices.rst
----------------------------------------------------------------------
diff --git a/shell/ext-py/sqlparse-0.1.19/docs/source/indices.rst b/shell/ext-py/sqlparse-0.1.19/docs/source/indices.rst
new file mode 100644
index 0000000..f74c5d8
--- /dev/null
+++ b/shell/ext-py/sqlparse-0.1.19/docs/source/indices.rst
@@ -0,0 +1,7 @@
+Indices and tables
+==================
+
+* :ref:`genindex`
+* :ref:`modindex`
+* :ref:`search`
+

http://git-wip-us.apache.org/repos/asf/impala/blob/49413d9c/shell/ext-py/sqlparse-0.1.19/docs/source/intro.rst
----------------------------------------------------------------------
diff --git a/shell/ext-py/sqlparse-0.1.19/docs/source/intro.rst b/shell/ext-py/sqlparse-0.1.19/docs/source/intro.rst
new file mode 100644
index 0000000..76d8fba
--- /dev/null
+++ b/shell/ext-py/sqlparse-0.1.19/docs/source/intro.rst
@@ -0,0 +1,143 @@
+Introduction
+============
+
+
+Download & Installation
+-----------------------
+
+The latest released version can be obtained from the `Python Package
+Index (PyPI) <http://pypi.python.org/pypi/sqlparse/>`_. To extract the
+install the module system-wide run
+
+.. code-block:: bash
+
+   $ tar cvfz python-sqlparse-VERSION.tar.gz
+   $ cd python-sqlparse/
+   $ sudo python setup.py install
+
+Alternatively you can install :mod:`sqlparse` using :command:`pip`:
+
+.. code-block:: bash
+
+   $ pip install sqlparse
+
+
+Getting Started
+---------------
+
+The :mod:`sqlparse` module provides three simple functions on module level
+to achieve some common tasks when working with SQL statements.
+This section shows some simple usage examples of these functions.
+
+Let's get started with splitting a string containing one or more SQL
+statements into a list of single statements using :meth:`~sqlparse.split`:
+
+.. code-block:: python
+
+  >>> import sqlparse
+  >>> sql = 'select * from foo; select * from bar;'
+  >>> sqlparse.split(sql)
+  [u'select * from foo; ', u'select * from bar;']
+
+The end of a statement is identified by the occurrence of a semicolon.
+Semicolons within certain SQL constructs like ``BEGIN ... END`` blocks
+are handled correctly by the splitting mechanism.
+
+SQL statements can be beautified by using the :meth:`~sqlarse.format` function.
+
+.. code-block:: python
+
+  >>> sql = 'select * from foo where id in (select id from bar);'
+  >>> print sqlparse.format(sql, reindent=True, keyword_case='upper')
+  SELECT *
+  FROM foo
+  WHERE id IN
+    (SELECT id
+     FROM bar);
+
+In this case all keywords in the given SQL are uppercased and the
+indentation is changed to make it more readable. Read :ref:`formatting` for
+a full reference of supported options given as keyword arguments
+to that function.
+
+Before proceeding with a closer look at the internal representation of
+SQL statements, you should be aware that this SQL parser is intentionally
+non-validating. It assumes that the given input is at least some kind
+of SQL and then it tries to analyze as much as possible without making
+too much assumptions about the concrete dialect or the actual statement.
+At least it's up to the user of this API to interpret the results right.
+
+When using the :meth:`~sqlparse.parse` function a tuple of
+:class:`~sqlparse.sql.Statement` instances is returned:
+
+.. code-block:: python
+
+  >>> sql = 'select * from "someschema"."mytable" where id = 1'
+  >>> parsed = sqlparse.parse(sql)
+  >>> parsed
+  (<Statement 'select...' at 0x9ad08ec>,)
+
+Each item of the tuple is a single statement as identified by the above
+mentioned :meth:`~sqlparse.split` function. So let's grab the only element
+from that list and have a look at the ``tokens`` attribute.
+Sub-tokens are stored in this attribute.
+
+.. code-block:: python
+
+  >>> stmt = parsed[0]  # grab the Statement object
+  >>> stmt.tokens
+  (<DML 'select' at 0x9b63c34>,
+   <Whitespace ' ' at 0x9b63e8c>,
+   <Operator '*' at 0x9b63e64>,
+   <Whitespace ' ' at 0x9b63c5c>,
+   <Keyword 'from' at 0x9b63c84>,
+   <Whitespace ' ' at 0x9b63cd4>,
+   <Identifier '"somes...' at 0x9b5c62c>,
+   <Whitespace ' ' at 0x9b63f04>,
+   <Where 'where ...' at 0x9b5caac>)
+
+Each object can be converted back to a string at any time:
+
+.. code-block:: python
+
+   >>> unicode(stmt)  # str(stmt) for Python 3
+   u'select * from "someschema"."mytable" where id = 1'
+   >>> unicode(stmt.tokens[-1])  # or just the WHERE part
+   u'where id = 1'
+
+Details of the returned objects are described in :ref:`analyze`.
+
+
+Development & Contributing
+--------------------------
+
+To check out the latest sources of this module run
+
+.. code-block:: bash
+
+   $ git clone git://github.com/andialbrecht/sqlparse.git
+
+
+to check out the latest sources from the repository.
+
+:mod:`sqlparse` is currently tested under Python 2.5, 2.6, 2.7, 3.2 and
+pypy. Tests are automatically run on each commit and for each pull
+request on Travis: https://travis-ci.org/andialbrecht/sqlparse
+
+Make sure to run the test suite before sending a pull request by running
+
+.. code-block:: bash
+
+   $ tox
+
+It's ok, if :command:`tox` doesn't find all interpreters listed
+above. Ideally a Python 2 and a Python 3 version should be tested
+locally.
+
+Please file bug reports and feature requests on the project site at
+https://github.com/andialbrecht/sqlparse/issues/new or if you have
+code to contribute upload it to http://codereview.appspot.com and
+add albrecht.andi@googlemail.com as reviewer.
+
+For more information about the review tool and how to use it visit
+it's project page: http://code.google.com/p/rietveld.

http://git-wip-us.apache.org/repos/asf/impala/blob/49413d9c/shell/ext-py/sqlparse-0.1.19/docs/source/ui.rst
----------------------------------------------------------------------
diff --git a/shell/ext-py/sqlparse-0.1.19/docs/source/ui.rst b/shell/ext-py/sqlparse-0.1.19/docs/source/ui.rst
new file mode 100644
index 0000000..264916e
--- /dev/null
+++ b/shell/ext-py/sqlparse-0.1.19/docs/source/ui.rst
@@ -0,0 +1,15 @@
+User Interfaces
+===============
+
+``sqlformat``
+  The ``sqlformat`` command line script ist distributed with the module.
+  Run :command:`sqlformat --help` to list available options and for usage
+  hints.
+
+``sqlformat.appspot.com``
+  An example `Google App Engine <http://code.google.com/appengine/>`_
+  application that exposes the formatting features using a web front-end.
+  See http://sqlformat.appspot.com for details.
+  The source for this application is available from a source code check out
+  of the :mod:`sqlparse` module (see :file:`extras/appengine`).
+

http://git-wip-us.apache.org/repos/asf/impala/blob/49413d9c/shell/ext-py/sqlparse-0.1.19/docs/sqlformat.1
----------------------------------------------------------------------
diff --git a/shell/ext-py/sqlparse-0.1.19/docs/sqlformat.1 b/shell/ext-py/sqlparse-0.1.19/docs/sqlformat.1
new file mode 100644
index 0000000..a42ea60
--- /dev/null
+++ b/shell/ext-py/sqlparse-0.1.19/docs/sqlformat.1
@@ -0,0 +1,65 @@
+.\" Based on template /usr/share/man-db/examples/manpage.example provided by 
+.\" Tom Christiansen <tc...@jhereg.perl.com>.
+.TH SQLFORMAT "1" "December 2010" "python-sqlparse version: 0.1.2" "User Commands"
+.SH NAME
+sqlformat \- reformat SQL
+.SH SYNOPSIS
+.PP
+.B sqlformat
+[
+.I "OPTION"
+] ... [
+.I "FILE"
+] ...
+.SH DESCRIPTION
+.\" Putting a newline after each sentence can generate better output.
+The `sqlformat' command-line tool can be used to reformat SQL file according to
+specified options or prepare a snippet in in some programming language (only
+Python and PHP currently supported).
+Use "-" for
+.I FILE
+to read from stdin.
+.SH OPTIONS
+.TP
+\fB\-i\fR \fICHOICE\fR|\fB\-\-identifiers\fR=\fIFORMAT\fR
+Change case of identifiers.
+.I FORMAT
+is one of "upper", "lower", "capitalize".
+.TP
+\fB\-k\fR \fICHOICE\fR|\fB\-\-keywords\fR=\fIFORMAT\fR
+Change case of keywords.
+.I FORMAT
+is one of "upper", "lower", "capitalize".
+.TP
+\fB\-l\fR \fICHOICE\fR|\fB\-\-language\fR=\fILANG\fR
+Output a snippet in programming language LANG.
+.I LANG
+can be "python", "php".
+.TP
+\fB\-o\fR \fIFILE\fR|\fB\-\-outfile\fR=\fIFILE\fR
+Write output to
+.I FILE
+(defaults to stdout).
+.TP
+.BR \-r | \-\-reindent
+Reindent statements.
+.TP
+\fB\-\-indent_width\fR=\fIINDENT_WIDTH\fR
+Set indent width to
+.IR INDENT_WIDTH .
+Default is 2 spaces.
+.TP
+\fB\-\-strip\-comments
+Remove comments.
+.TP
+.BR \-h | \-\-help
+Print a short help message and exit.
+All subsequent options are ignored.
+.TP
+.BR --verbose
+Verbose output.
+.TP
+.BR \-\-version
+Print program's version number and exit.
+.SH AUTHORS
+This man page was written by Andriy Senkovych <jo...@itblog.org.ua>

http://git-wip-us.apache.org/repos/asf/impala/blob/49413d9c/shell/ext-py/sqlparse-0.1.19/pytest.ini
----------------------------------------------------------------------
diff --git a/shell/ext-py/sqlparse-0.1.19/pytest.ini b/shell/ext-py/sqlparse-0.1.19/pytest.ini
new file mode 100644
index 0000000..a2cbd90
--- /dev/null
+++ b/shell/ext-py/sqlparse-0.1.19/pytest.ini
@@ -0,0 +1,6 @@
+[pytest]
+pep8ignore = 
+  extras/* ALL
+  examples/* ALL
+  docs/* ALL
+  * E125 E127

http://git-wip-us.apache.org/repos/asf/impala/blob/49413d9c/shell/ext-py/sqlparse-0.1.19/setup.py
----------------------------------------------------------------------
diff --git a/shell/ext-py/sqlparse-0.1.19/setup.py b/shell/ext-py/sqlparse-0.1.19/setup.py
new file mode 100644
index 0000000..2c6dce8
--- /dev/null
+++ b/shell/ext-py/sqlparse-0.1.19/setup.py
@@ -0,0 +1,123 @@
+# Copyright (C) 2008 Andi Albrecht, albrecht.andi@gmail.com
+#
+# This setup script is part of python-sqlparse and is released under
+# the BSD License: http://www.opensource.org/licenses/bsd-license.php.
+
+import re
+import sys
+
+try:
+    from setuptools import setup, find_packages
+    packages = find_packages(exclude=('tests',))
+except ImportError:
+    if sys.version_info[0] == 3:
+        raise RuntimeError('distribute is required to install this package.')
+    from distutils.core import setup
+    packages = ['sqlparse', 'sqlparse.engine']
+
+
+def get_version():
+    """parse __init__.py for version number instead of importing the file
+
+    see http://stackoverflow.com/questions/458550/standard-way-to-embed-version-into-python-package
+    """
+    VERSIONFILE='sqlparse/__init__.py'
+    verstrline = open(VERSIONFILE, "rt").read()
+    VSRE = r'^__version__ = [\'"]([^\'"]*)[\'"]'
+    mo = re.search(VSRE, verstrline, re.M)
+    if mo:
+        return mo.group(1)
+    else:
+        raise RuntimeError('Unable to find version string in %s.'
+                           % (VERSIONFILE,))
+
+
+LONG_DESCRIPTION = """
+``sqlparse`` is a non-validating SQL parser module.
+It provides support for parsing, splitting and formatting SQL statements.
+
+Visit the `project page <https://github.com/andialbrecht/sqlparse>`_ for
+additional information and documentation.
+
+**Example Usage**
+
+
+Splitting SQL statements::
+
+   >>> import sqlparse
+   >>> sqlparse.split('select * from foo; select * from bar;')
+   [u'select * from foo; ', u'select * from bar;']
+
+
+Formatting statemtents::
+
+   >>> sql = 'select * from foo where id in (select id from bar);'
+   >>> print sqlparse.format(sql, reindent=True, keyword_case='upper')
+   SELECT *
+   FROM foo
+   WHERE id IN
+     (SELECT id
+      FROM bar);
+
+
+Parsing::
+
+   >>> sql = 'select * from someschema.mytable where id = 1'
+   >>> res = sqlparse.parse(sql)
+   >>> res
+   (<Statement 'select...' at 0x9ad08ec>,)
+   >>> stmt = res[0]
+   >>> unicode(stmt)  # converting it back to unicode
+   u'select * from someschema.mytable where id = 1'
+   >>> # This is how the internal representation looks like:
+   >>> stmt.tokens
+   (<DML 'select' at 0x9b63c34>,
+    <Whitespace ' ' at 0x9b63e8c>,
+    <Operator '*' at 0x9b63e64>,
+    <Whitespace ' ' at 0x9b63c5c>,
+    <Keyword 'from' at 0x9b63c84>,
+    <Whitespace ' ' at 0x9b63cd4>,
+    <Identifier 'somes...' at 0x9b5c62c>,
+    <Whitespace ' ' at 0x9b63f04>,
+    <Where 'where ...' at 0x9b5caac>)
+
+"""
+
+VERSION = get_version()
+
+
+kwargs = {}
+if sys.version_info[0] == 3:
+    kwargs['use_2to3'] = True
+
+
+setup(
+    name='sqlparse',
+    version=VERSION,
+    packages=packages,
+    description='Non-validating SQL parser',
+    author='Andi Albrecht',
+    author_email='albrecht.andi@gmail.com',
+    long_description=LONG_DESCRIPTION,
+    license='BSD',
+    url='https://github.com/andialbrecht/sqlparse',
+    classifiers=[
+        'Development Status :: 4 - Beta',
+        'Intended Audience :: Developers',
+        'License :: OSI Approved :: BSD License',
+        'Operating System :: OS Independent',
+        'Programming Language :: Python',
+        'Programming Language :: Python :: 2',
+        'Programming Language :: Python :: 2.4',
+        'Programming Language :: Python :: 2.5',
+        'Programming Language :: Python :: 2.6',
+        'Programming Language :: Python :: 2.7',
+        'Programming Language :: Python :: 3',
+        'Programming Language :: Python :: 3.2',
+        'Programming Language :: Python :: 3.3',
+        'Topic :: Database',
+        'Topic :: Software Development'
+    ],
+    scripts=['bin/sqlformat'],
+    **kwargs
+)

http://git-wip-us.apache.org/repos/asf/impala/blob/49413d9c/shell/ext-py/sqlparse-0.1.19/sqlparse/__init__.py
----------------------------------------------------------------------
diff --git a/shell/ext-py/sqlparse-0.1.19/sqlparse/__init__.py b/shell/ext-py/sqlparse-0.1.19/sqlparse/__init__.py
new file mode 100644
index 0000000..238bfb6
--- /dev/null
+++ b/shell/ext-py/sqlparse-0.1.19/sqlparse/__init__.py
@@ -0,0 +1,78 @@
+# Copyright (C) 2008 Andi Albrecht, albrecht.andi@gmail.com
+#
+# This module is part of python-sqlparse and is released under
+# the BSD License: http://www.opensource.org/licenses/bsd-license.php.
+
+"""Parse SQL statements."""
+
+
+__version__ = '0.1.19'
+
+
+# Setup namespace
+from sqlparse import engine
+from sqlparse import filters
+from sqlparse import formatter
+
+# Deprecated in 0.1.5. Will be removed in 0.2.0
+from sqlparse.exceptions import SQLParseError
+
+
+def parse(sql, encoding=None):
+    """Parse sql and return a list of statements.
+
+    :param sql: A string containting one or more SQL statements.
+    :param encoding: The encoding of the statement (optional).
+    :returns: A tuple of :class:`~sqlparse.sql.Statement` instances.
+    """
+    return tuple(parsestream(sql, encoding))
+
+
+def parsestream(stream, encoding=None):
+    """Parses sql statements from file-like object.
+
+    :param stream: A file-like object.
+    :param encoding: The encoding of the stream contents (optional).
+    :returns: A generator of :class:`~sqlparse.sql.Statement` instances.
+    """
+    stack = engine.FilterStack()
+    stack.full_analyze()
+    return stack.run(stream, encoding)
+
+
+def format(sql, **options):
+    """Format *sql* according to *options*.
+
+    Available options are documented in :ref:`formatting`.
+
+    In addition to the formatting options this function accepts the
+    keyword "encoding" which determines the encoding of the statement.
+
+    :returns: The formatted SQL statement as string.
+    """
+    encoding = options.pop('encoding', None)
+    stack = engine.FilterStack()
+    options = formatter.validate_options(options)
+    stack = formatter.build_filter_stack(stack, options)
+    stack.postprocess.append(filters.SerializerUnicode())
+    return ''.join(stack.run(sql, encoding))
+
+
+def split(sql, encoding=None):
+    """Split *sql* into single statements.
+
+    :param sql: A string containting one or more SQL statements.
+    :param encoding: The encoding of the statement (optional).
+    :returns: A list of strings.
+    """
+    stack = engine.FilterStack()
+    stack.split_statements = True
+    return [unicode(stmt).strip() for stmt in stack.run(sql, encoding)]
+
+
+from sqlparse.engine.filter import StatementFilter
+
+
+def split2(stream):
+    splitter = StatementFilter()
+    return list(splitter.process(None, stream))

http://git-wip-us.apache.org/repos/asf/impala/blob/49413d9c/shell/ext-py/sqlparse-0.1.19/sqlparse/engine/__init__.py
----------------------------------------------------------------------
diff --git a/shell/ext-py/sqlparse-0.1.19/sqlparse/engine/__init__.py b/shell/ext-py/sqlparse-0.1.19/sqlparse/engine/__init__.py
new file mode 100644
index 0000000..62c82b8
--- /dev/null
+++ b/shell/ext-py/sqlparse-0.1.19/sqlparse/engine/__init__.py
@@ -0,0 +1,80 @@
+# Copyright (C) 2008 Andi Albrecht, albrecht.andi@gmail.com
+#
+# This module is part of python-sqlparse and is released under
+# the BSD License: http://www.opensource.org/licenses/bsd-license.php.
+
+"""filter"""
+
+from sqlparse import lexer
+from sqlparse.engine import grouping
+from sqlparse.engine.filter import StatementFilter
+
+# XXX remove this when cleanup is complete
+Filter = object
+
+
+class FilterStack(object):
+
+    def __init__(self):
+        self.preprocess = []
+        self.stmtprocess = []
+        self.postprocess = []
+        self.split_statements = False
+        self._grouping = False
+
+    def _flatten(self, stream):
+        for token in stream:
+            if token.is_group():
+                for t in self._flatten(token.tokens):
+                    yield t
+            else:
+                yield token
+
+    def enable_grouping(self):
+        self._grouping = True
+
+    def full_analyze(self):
+        self.enable_grouping()
+
+    def run(self, sql, encoding=None):
+        stream = lexer.tokenize(sql, encoding)
+        # Process token stream
+        if self.preprocess:
+            for filter_ in self.preprocess:
+                stream = filter_.process(self, stream)
+
+        if (self.stmtprocess or self.postprocess or self.split_statements
+            or self._grouping):
+            splitter = StatementFilter()
+            stream = splitter.process(self, stream)
+
+        if self._grouping:
+
+            def _group(stream):
+                for stmt in stream:
+                    grouping.group(stmt)
+                    yield stmt
+            stream = _group(stream)
+
+        if self.stmtprocess:
+
+            def _run1(stream):
+                ret = []
+                for stmt in stream:
+                    for filter_ in self.stmtprocess:
+                        filter_.process(self, stmt)
+                    ret.append(stmt)
+                return ret
+            stream = _run1(stream)
+
+        if self.postprocess:
+
+            def _run2(stream):
+                for stmt in stream:
+                    stmt.tokens = list(self._flatten(stmt.tokens))
+                    for filter_ in self.postprocess:
+                        stmt = filter_.process(self, stmt)
+                    yield stmt
+            stream = _run2(stream)
+
+        return stream

http://git-wip-us.apache.org/repos/asf/impala/blob/49413d9c/shell/ext-py/sqlparse-0.1.19/sqlparse/engine/filter.py
----------------------------------------------------------------------
diff --git a/shell/ext-py/sqlparse-0.1.19/sqlparse/engine/filter.py b/shell/ext-py/sqlparse-0.1.19/sqlparse/engine/filter.py
new file mode 100644
index 0000000..f7dd264
--- /dev/null
+++ b/shell/ext-py/sqlparse-0.1.19/sqlparse/engine/filter.py
@@ -0,0 +1,112 @@
+# -*- coding: utf-8 -*-
+
+from sqlparse.sql import Statement, Token
+from sqlparse import tokens as T
+
+
+class StatementFilter:
+    "Filter that split stream at individual statements"
+
+    def __init__(self):
+        self._in_declare = False
+        self._in_dbldollar = False
+        self._is_create = False
+        self._begin_depth = 0
+
+    def _reset(self):
+        "Set the filter attributes to its default values"
+        self._in_declare = False
+        self._in_dbldollar = False
+        self._is_create = False
+        self._begin_depth = 0
+
+    def _change_splitlevel(self, ttype, value):
+        "Get the new split level (increase, decrease or remain equal)"
+        # PostgreSQL
+        if (ttype == T.Name.Builtin
+            and value.startswith('$') and value.endswith('$')):
+            if self._in_dbldollar:
+                self._in_dbldollar = False
+                return -1
+            else:
+                self._in_dbldollar = True
+                return 1
+        elif self._in_dbldollar:
+            return 0
+
+        # ANSI
+        if ttype not in T.Keyword:
+            return 0
+
+        unified = value.upper()
+
+        if unified == 'DECLARE' and self._is_create and self._begin_depth == 0:
+            self._in_declare = True
+            return 1
+
+        if unified == 'BEGIN':
+            self._begin_depth += 1
+            if self._in_declare or self._is_create:
+                # FIXME(andi): This makes no sense.
+                return 1
+            return 0
+
+        if unified in ('END IF', 'END FOR'):
+            return -1
+
+        if unified == 'END':
+            # Should this respect a preceeding BEGIN?
+            # In CASE ... WHEN ... END this results in a split level -1.
+            self._begin_depth = max(0, self._begin_depth - 1)
+            return -1
+
+        if ttype is T.Keyword.DDL and unified.startswith('CREATE'):
+            self._is_create = True
+            return 0
+
+        if (unified in ('IF', 'FOR')
+            and self._is_create and self._begin_depth > 0):
+            return 1
+
+        # Default
+        return 0
+
+    def process(self, stack, stream):
+        "Process the stream"
+        consume_ws = False
+        splitlevel = 0
+        stmt = None
+        stmt_tokens = []
+
+        # Run over all stream tokens
+        for ttype, value in stream:
+            # Yield token if we finished a statement and there's no whitespaces
+            if consume_ws and ttype not in (T.Whitespace, T.Comment.Single):
+                stmt.tokens = stmt_tokens
+                yield stmt
+
+                # Reset filter and prepare to process next statement
+                self._reset()
+                consume_ws = False
+                splitlevel = 0
+                stmt = None
+
+            # Create a new statement if we are not currently in one of them
+            if stmt is None:
+                stmt = Statement()
+                stmt_tokens = []
+
+            # Change current split level (increase, decrease or remain equal)
+            splitlevel += self._change_splitlevel(ttype, value)
+
+            # Append the token to the current statement
+            stmt_tokens.append(Token(ttype, value))
+
+            # Check if we get the end of a statement
+            if splitlevel <= 0 and ttype is T.Punctuation and value == ';':
+                consume_ws = True
+
+        # Yield pending statement (if any)
+        if stmt is not None:
+            stmt.tokens = stmt_tokens
+            yield stmt

http://git-wip-us.apache.org/repos/asf/impala/blob/49413d9c/shell/ext-py/sqlparse-0.1.19/sqlparse/engine/grouping.py
----------------------------------------------------------------------
diff --git a/shell/ext-py/sqlparse-0.1.19/sqlparse/engine/grouping.py b/shell/ext-py/sqlparse-0.1.19/sqlparse/engine/grouping.py
new file mode 100644
index 0000000..bb1f0b0
--- /dev/null
+++ b/shell/ext-py/sqlparse-0.1.19/sqlparse/engine/grouping.py
@@ -0,0 +1,461 @@
+# -*- coding: utf-8 -*-
+
+import itertools
+
+from sqlparse import sql
+from sqlparse import tokens as T
+
+try:
+    next
+except NameError:  # Python < 2.6
+    next = lambda i: i.next()
+
+
+def _group_left_right(tlist, ttype, value, cls,
+                      check_right=lambda t: True,
+                      check_left=lambda t: True,
+                      include_semicolon=False):
+    [_group_left_right(sgroup, ttype, value, cls, check_right, check_left,
+                       include_semicolon) for sgroup in tlist.get_sublists()
+     if not isinstance(sgroup, cls)]
+    idx = 0
+    token = tlist.token_next_match(idx, ttype, value)
+    while token:
+        right = tlist.token_next(tlist.token_index(token))
+        left = tlist.token_prev(tlist.token_index(token))
+        if right is None or not check_right(right):
+            token = tlist.token_next_match(tlist.token_index(token) + 1,
+                                           ttype, value)
+        elif left is None or not check_left(left):
+            token = tlist.token_next_match(tlist.token_index(token) + 1,
+                                           ttype, value)
+        else:
+            if include_semicolon:
+                sright = tlist.token_next_match(tlist.token_index(right),
+                                                T.Punctuation, ';')
+                if sright is not None:
+                    # only overwrite "right" if a semicolon is actually
+                    # present.
+                    right = sright
+            tokens = tlist.tokens_between(left, right)[1:]
+            if not isinstance(left, cls):
+                new = cls([left])
+                new_idx = tlist.token_index(left)
+                tlist.tokens.remove(left)
+                tlist.tokens.insert(new_idx, new)
+                left = new
+            left.tokens.extend(tokens)
+            for t in tokens:
+                tlist.tokens.remove(t)
+            token = tlist.token_next_match(tlist.token_index(left) + 1,
+                                           ttype, value)
+
+
+def _find_matching(idx, tlist, start_ttype, start_value, end_ttype, end_value):
+    depth = 1
+    for tok in tlist.tokens[idx:]:
+        if tok.match(start_ttype, start_value):
+            depth += 1
+        elif tok.match(end_ttype, end_value):
+            depth -= 1
+            if depth == 1:
+                return tok
+    return None
+
+
+def _group_matching(tlist, start_ttype, start_value, end_ttype, end_value,
+                    cls, include_semicolon=False, recurse=False):
+
+    [_group_matching(sgroup, start_ttype, start_value, end_ttype, end_value,
+                     cls, include_semicolon) for sgroup in tlist.get_sublists()
+     if recurse]
+    if isinstance(tlist, cls):
+        idx = 1
+    else:
+        idx = 0
+    token = tlist.token_next_match(idx, start_ttype, start_value)
+    while token:
+        tidx = tlist.token_index(token)
+        end = _find_matching(tidx, tlist, start_ttype, start_value,
+                             end_ttype, end_value)
+        if end is None:
+            idx = tidx + 1
+        else:
+            if include_semicolon:
+                next_ = tlist.token_next(tlist.token_index(end))
+                if next_ and next_.match(T.Punctuation, ';'):
+                    end = next_
+            group = tlist.group_tokens(cls, tlist.tokens_between(token, end))
+            _group_matching(group, start_ttype, start_value,
+                            end_ttype, end_value, cls, include_semicolon)
+            idx = tlist.token_index(group) + 1
+        token = tlist.token_next_match(idx, start_ttype, start_value)
+
+
+def group_if(tlist):
+    _group_matching(tlist, T.Keyword, 'IF', T.Keyword, 'END IF', sql.If, True)
+
+
+def group_for(tlist):
+    _group_matching(tlist, T.Keyword, 'FOR', T.Keyword, 'END LOOP',
+                    sql.For, True)
+
+
+def group_foreach(tlist):
+    _group_matching(tlist, T.Keyword, 'FOREACH', T.Keyword, 'END LOOP',
+                    sql.For, True)
+
+
+def group_begin(tlist):
+    _group_matching(tlist, T.Keyword, 'BEGIN', T.Keyword, 'END',
+                    sql.Begin, True)
+
+
+def group_as(tlist):
+
+    def _right_valid(token):
+        # Currently limited to DML/DDL. Maybe additional more non SQL reserved
+        # keywords should appear here (see issue8).
+        return not token.ttype in (T.DML, T.DDL)
+
+    def _left_valid(token):
+        if token.ttype is T.Keyword and token.value in ('NULL',):
+            return True
+        return token.ttype is not T.Keyword
+
+    _group_left_right(tlist, T.Keyword, 'AS', sql.Identifier,
+                      check_right=_right_valid,
+                      check_left=_left_valid)
+
+
+def group_assignment(tlist):
+    _group_left_right(tlist, T.Assignment, ':=', sql.Assignment,
+                      include_semicolon=True)
+
+
+def group_comparison(tlist):
+
+    def _parts_valid(token):
+        return (token.ttype in (T.String.Symbol, T.String.Single,
+                                T.Name, T.Number, T.Number.Float,
+                                T.Number.Integer, T.Literal,
+                                T.Literal.Number.Integer, T.Name.Placeholder)
+                or isinstance(token, (sql.Identifier, sql.Parenthesis))
+                or (token.ttype is T.Keyword
+                    and token.value.upper() in ['NULL', ]))
+    _group_left_right(tlist, T.Operator.Comparison, None, sql.Comparison,
+                      check_left=_parts_valid, check_right=_parts_valid)
+
+
+def group_case(tlist):
+    _group_matching(tlist, T.Keyword, 'CASE', T.Keyword, 'END', sql.Case,
+                    include_semicolon=True, recurse=True)
+
+
+def group_identifier(tlist):
+    def _consume_cycle(tl, i):
+        # TODO: Usage of Wildcard token is ambivalent here.
+        x = itertools.cycle((
+            lambda y: (y.match(T.Punctuation, '.')
+                       or y.ttype in (T.Operator,
+                                      T.Wildcard,
+                                      T.Name)
+                       or isinstance(y, sql.SquareBrackets)),
+            lambda y: (y.ttype in (T.String.Symbol,
+                                   T.Name,
+                                   T.Wildcard,
+                                   T.Literal.String.Single,
+                                   T.Literal.Number.Integer,
+                                   T.Literal.Number.Float)
+                       or isinstance(y, (sql.Parenthesis,
+                                         sql.SquareBrackets,
+                                         sql.Function)))))
+        for t in tl.tokens[i:]:
+            # Don't take whitespaces into account.
+            if t.ttype is T.Whitespace:
+                yield t
+                continue
+            if next(x)(t):
+                yield t
+            else:
+                if isinstance(t, sql.Comment) and t.is_multiline():
+                    yield t
+                return
+
+    def _next_token(tl, i):
+        # chooses the next token. if two tokens are found then the
+        # first is returned.
+        t1 = tl.token_next_by_type(
+            i, (T.String.Symbol, T.Name, T.Literal.Number.Integer,
+                T.Literal.Number.Float))
+
+        i1 = tl.token_index(t1, start=i) if t1 else None
+        t2_end = None if i1 is None else i1 + 1
+        t2 = tl.token_next_by_instance(i, (sql.Function, sql.Parenthesis), end=t2_end)
+
+        if t1 and t2:
+            i2 = tl.token_index(t2, start=i)
+            if i1 > i2:
+                return t2
+            else:
+                return t1
+        elif t1:
+            return t1
+        else:
+            return t2
+
+    # bottom up approach: group subgroups first
+    [group_identifier(sgroup) for sgroup in tlist.get_sublists()
+     if not isinstance(sgroup, sql.Identifier)]
+
+    # real processing
+    idx = 0
+    token = _next_token(tlist, idx)
+    while token:
+        identifier_tokens = [token] + list(
+            _consume_cycle(tlist,
+                           tlist.token_index(token, start=idx) + 1))
+        # remove trailing whitespace
+        if identifier_tokens and identifier_tokens[-1].ttype is T.Whitespace:
+            identifier_tokens = identifier_tokens[:-1]
+        if not (len(identifier_tokens) == 1
+                and (isinstance(identifier_tokens[0], (sql.Function, sql.Parenthesis))
+                     or identifier_tokens[0].ttype in (T.Literal.Number.Integer,
+                                                       T.Literal.Number.Float))):
+            group = tlist.group_tokens(sql.Identifier, identifier_tokens)
+            idx = tlist.token_index(group, start=idx) + 1
+        else:
+            idx += 1
+        token = _next_token(tlist, idx)
+
+
+def group_identifier_list(tlist):
+    [group_identifier_list(sgroup) for sgroup in tlist.get_sublists()
+     if not isinstance(sgroup, sql.IdentifierList)]
+    # Allowed list items
+    fend1_funcs = [lambda t: isinstance(t, (sql.Identifier, sql.Function,
+                                            sql.Case)),
+                   lambda t: t.is_whitespace(),
+                   lambda t: t.ttype == T.Name,
+                   lambda t: t.ttype == T.Wildcard,
+                   lambda t: t.match(T.Keyword, 'null'),
+                   lambda t: t.match(T.Keyword, 'role'),
+                   lambda t: t.ttype == T.Number.Integer,
+                   lambda t: t.ttype == T.String.Single,
+                   lambda t: t.ttype == T.Name.Placeholder,
+                   lambda t: t.ttype == T.Keyword,
+                   lambda t: isinstance(t, sql.Comparison),
+                   lambda t: isinstance(t, sql.Comment),
+                   lambda t: t.ttype == T.Comment.Multiline,
+                   ]
+    tcomma = tlist.token_next_match(0, T.Punctuation, ',')
+    start = None
+    while tcomma is not None:
+        # Go back one idx to make sure to find the correct tcomma
+        idx = tlist.token_index(tcomma)
+        before = tlist.token_prev(idx)
+        after = tlist.token_next(idx)
+        # Check if the tokens around tcomma belong to a list
+        bpassed = apassed = False
+        for func in fend1_funcs:
+            if before is not None and func(before):
+                bpassed = True
+            if after is not None and func(after):
+                apassed = True
+        if not bpassed or not apassed:
+            # Something's wrong here, skip ahead to next ","
+            start = None
+            tcomma = tlist.token_next_match(idx + 1,
+                                            T.Punctuation, ',')
+        else:
+            if start is None:
+                start = before
+            after_idx = tlist.token_index(after, start=idx)
+            next_ = tlist.token_next(after_idx)
+            if next_ is None or not next_.match(T.Punctuation, ','):
+                # Reached the end of the list
+                tokens = tlist.tokens_between(start, after)
+                group = tlist.group_tokens(sql.IdentifierList, tokens)
+                start = None
+                tcomma = tlist.token_next_match(tlist.token_index(group) + 1,
+                                                T.Punctuation, ',')
+            else:
+                tcomma = next_
+
+
+def group_brackets(tlist):
+    """Group parentheses () or square brackets []
+
+        This is just like _group_matching, but complicated by the fact that
+        round brackets can contain square bracket groups and vice versa
+    """
+
+    if isinstance(tlist, (sql.Parenthesis, sql.SquareBrackets)):
+        idx = 1
+    else:
+        idx = 0
+
+    # Find the first opening bracket
+    token = tlist.token_next_match(idx, T.Punctuation, ['(', '['])
+
+    while token:
+        start_val = token.value  # either '(' or '['
+        if start_val == '(':
+            end_val = ')'
+            group_class = sql.Parenthesis
+        else:
+            end_val = ']'
+            group_class = sql.SquareBrackets
+
+        tidx = tlist.token_index(token)
+
+        # Find the corresponding closing bracket
+        end = _find_matching(tidx, tlist, T.Punctuation, start_val,
+                             T.Punctuation, end_val)
+
+        if end is None:
+            idx = tidx + 1
+        else:
+            group = tlist.group_tokens(group_class,
+                                       tlist.tokens_between(token, end))
+
+            # Check for nested bracket groups within this group
+            group_brackets(group)
+            idx = tlist.token_index(group) + 1
+
+        # Find the next opening bracket
+        token = tlist.token_next_match(idx, T.Punctuation, ['(', '['])
+
+
+def group_comments(tlist):
+    [group_comments(sgroup) for sgroup in tlist.get_sublists()
+     if not isinstance(sgroup, sql.Comment)]
+    idx = 0
+    token = tlist.token_next_by_type(idx, T.Comment)
+    while token:
+        tidx = tlist.token_index(token)
+        end = tlist.token_not_matching(tidx + 1,
+                                       [lambda t: t.ttype in T.Comment,
+                                        lambda t: t.is_whitespace()])
+        if end is None:
+            idx = tidx + 1
+        else:
+            eidx = tlist.token_index(end)
+            grp_tokens = tlist.tokens_between(token,
+                                              tlist.token_prev(eidx, False))
+            group = tlist.group_tokens(sql.Comment, grp_tokens)
+            idx = tlist.token_index(group)
+        token = tlist.token_next_by_type(idx, T.Comment)
+
+
+def group_where(tlist):
+    [group_where(sgroup) for sgroup in tlist.get_sublists()
+     if not isinstance(sgroup, sql.Where)]
+    idx = 0
+    token = tlist.token_next_match(idx, T.Keyword, 'WHERE')
+    stopwords = ('ORDER', 'GROUP', 'LIMIT', 'UNION', 'EXCEPT', 'HAVING')
+    while token:
+        tidx = tlist.token_index(token)
+        end = tlist.token_next_match(tidx + 1, T.Keyword, stopwords)
+        if end is None:
+            end = tlist._groupable_tokens[-1]
+        else:
+            end = tlist.tokens[tlist.token_index(end) - 1]
+        group = tlist.group_tokens(sql.Where,
+                                   tlist.tokens_between(token, end),
+                                   ignore_ws=True)
+        idx = tlist.token_index(group)
+        token = tlist.token_next_match(idx, T.Keyword, 'WHERE')
+
+
+def group_aliased(tlist):
+    clss = (sql.Identifier, sql.Function, sql.Case)
+    [group_aliased(sgroup) for sgroup in tlist.get_sublists()
+     if not isinstance(sgroup, clss)]
+    idx = 0
+    token = tlist.token_next_by_instance(idx, clss)
+    while token:
+        next_ = tlist.token_next(tlist.token_index(token))
+        if next_ is not None and isinstance(next_, clss):
+            if not next_.value.upper().startswith('VARCHAR'):
+                grp = tlist.tokens_between(token, next_)[1:]
+                token.tokens.extend(grp)
+                for t in grp:
+                    tlist.tokens.remove(t)
+        idx = tlist.token_index(token) + 1
+        token = tlist.token_next_by_instance(idx, clss)
+
+
+def group_typecasts(tlist):
+    _group_left_right(tlist, T.Punctuation, '::', sql.Identifier)
+
+
+def group_functions(tlist):
+    [group_functions(sgroup) for sgroup in tlist.get_sublists()
+     if not isinstance(sgroup, sql.Function)]
+    idx = 0
+    token = tlist.token_next_by_type(idx, T.Name)
+    while token:
+        next_ = tlist.token_next(token)
+        if not isinstance(next_, sql.Parenthesis):
+            idx = tlist.token_index(token) + 1
+        else:
+            func = tlist.group_tokens(sql.Function,
+                                      tlist.tokens_between(token, next_))
+            idx = tlist.token_index(func) + 1
+        token = tlist.token_next_by_type(idx, T.Name)
+
+
+def group_order(tlist):
+    idx = 0
+    token = tlist.token_next_by_type(idx, T.Keyword.Order)
+    while token:
+        prev = tlist.token_prev(token)
+        if isinstance(prev, sql.Identifier):
+            ido = tlist.group_tokens(sql.Identifier,
+                                     tlist.tokens_between(prev, token))
+            idx = tlist.token_index(ido) + 1
+        else:
+            idx = tlist.token_index(token) + 1
+        token = tlist.token_next_by_type(idx, T.Keyword.Order)
+
+
+def align_comments(tlist):
+    [align_comments(sgroup) for sgroup in tlist.get_sublists()]
+    idx = 0
+    token = tlist.token_next_by_instance(idx, sql.Comment)
+    while token:
+        before = tlist.token_prev(tlist.token_index(token))
+        if isinstance(before, sql.TokenList):
+            grp = tlist.tokens_between(before, token)[1:]
+            before.tokens.extend(grp)
+            for t in grp:
+                tlist.tokens.remove(t)
+            idx = tlist.token_index(before) + 1
+        else:
+            idx = tlist.token_index(token) + 1
+        token = tlist.token_next_by_instance(idx, sql.Comment)
+
+
+def group(tlist):
+    for func in [
+            group_comments,
+            group_brackets,
+            group_functions,
+            group_where,
+            group_case,
+            group_identifier,
+            group_order,
+            group_typecasts,
+            group_as,
+            group_aliased,
+            group_assignment,
+            group_comparison,
+            align_comments,
+            group_identifier_list,
+            group_if,
+            group_for,
+            group_foreach,
+            group_begin,
+            ]:
+        func(tlist)

http://git-wip-us.apache.org/repos/asf/impala/blob/49413d9c/shell/ext-py/sqlparse-0.1.19/sqlparse/exceptions.py
----------------------------------------------------------------------
diff --git a/shell/ext-py/sqlparse-0.1.19/sqlparse/exceptions.py b/shell/ext-py/sqlparse-0.1.19/sqlparse/exceptions.py
new file mode 100644
index 0000000..ec25afa
--- /dev/null
+++ b/shell/ext-py/sqlparse-0.1.19/sqlparse/exceptions.py
@@ -0,0 +1,10 @@
+# Copyright (C) 2012 Andi Albrecht, albrecht.andi@gmail.com
+#
+# This module is part of python-sqlparse and is released under
+# the BSD License: http://www.opensource.org/licenses/bsd-license.php.
+
+"""Exceptions used in this package."""
+
+
+class SQLParseError(Exception):
+    """Base class for exceptions in this module."""